Merge "Prevent object of AudioEffect be deleted until construction finished" am: 637ee654cf am: 1b201ee707
am: e21535e728
Change-Id: I3a4e538c2e0b89be867f9d145acc0988fb697339
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 4603515..a7ac2d7 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -73,7 +73,7 @@
static bool gMonotonicTime = false; // use system monotonic time for timestamps
static bool gPersistentSurface = false; // use persistent surface
static enum {
- FORMAT_MP4, FORMAT_H264, FORMAT_FRAMES, FORMAT_RAW_FRAMES
+ FORMAT_MP4, FORMAT_H264, FORMAT_WEBM, FORMAT_3GPP, FORMAT_FRAMES, FORMAT_RAW_FRAMES
} gOutputFormat = FORMAT_MP4; // data format for output
static AString gCodecName = ""; // codec name override
static bool gSizeSpecified = false; // was size explicitly requested?
@@ -669,7 +669,9 @@
sp<MediaMuxer> muxer = NULL;
FILE* rawFp = NULL;
switch (gOutputFormat) {
- case FORMAT_MP4: {
+ case FORMAT_MP4:
+ case FORMAT_WEBM:
+ case FORMAT_3GPP: {
// Configure muxer. We have to wait for the CSD blob from the encoder
// before we can start it.
err = unlink(fileName);
@@ -682,7 +684,13 @@
fprintf(stderr, "ERROR: couldn't open file\n");
abort();
}
- muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+ if (gOutputFormat == FORMAT_MP4) {
+ muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+ } else if (gOutputFormat == FORMAT_WEBM) {
+ muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_WEBM);
+ } else {
+ muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_THREE_GPP);
+ }
close(fd);
if (gRotate) {
muxer->setOrientationHint(90); // TODO: does this do anything?
@@ -1002,6 +1010,10 @@
gOutputFormat = FORMAT_MP4;
} else if (strcmp(optarg, "h264") == 0) {
gOutputFormat = FORMAT_H264;
+ } else if (strcmp(optarg, "webm") == 0) {
+ gOutputFormat = FORMAT_WEBM;
+ } else if (strcmp(optarg, "3gpp") == 0) {
+ gOutputFormat = FORMAT_3GPP;
} else if (strcmp(optarg, "frames") == 0) {
gOutputFormat = FORMAT_FRAMES;
} else if (strcmp(optarg, "raw-frames") == 0) {
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 61fc897..bddf945 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -78,6 +78,7 @@
static bool gPlaybackAudio;
static bool gWriteMP4;
static bool gDisplayHistogram;
+static bool gVerbose = false;
static bool showProgress = true;
static String8 gWriteMP4Filename;
static String8 gComponentNameOverride;
@@ -159,6 +160,11 @@
break;
}
+ if (gVerbose) {
+ MetaDataBase &meta = mbuf->meta_data();
+ fprintf(stdout, "sample format: %s\n", meta.toString().c_str());
+ }
+
CHECK_EQ(
fwrite((const uint8_t *)mbuf->data() + mbuf->range_offset(),
1,
@@ -348,7 +354,10 @@
decodeTimesUs.push(delayDecodeUs);
}
- if (showProgress && (n++ % 16) == 0) {
+ if (gVerbose) {
+ MetaDataBase &meta = buffer->meta_data();
+ fprintf(stdout, "%ld sample format: %s\n", numFrames, meta.toString().c_str());
+ } else if (showProgress && (n++ % 16) == 0) {
printf(".");
fflush(stdout);
}
@@ -579,12 +588,12 @@
break;
}
+ CHECK(buffer != NULL);
+
if (buffer->range_length() > 0) {
break;
}
- CHECK(buffer != NULL);
-
buffer->release();
buffer = NULL;
}
@@ -630,6 +639,7 @@
fprintf(stderr, " -T allocate buffers from a surface texture\n");
fprintf(stderr, " -d(ump) output_filename (raw stream data to a file)\n");
fprintf(stderr, " -D(ump) output_filename (decoded PCM data to a file)\n");
+ fprintf(stderr, " -v be more verbose\n");
}
static void dumpCodecProfiles(bool queryDecoders) {
@@ -640,7 +650,8 @@
MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9,
- MEDIA_MIMETYPE_VIDEO_DOLBY_VISION
+ MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+ MEDIA_MIMETYPE_AUDIO_EAC3, MEDIA_MIMETYPE_AUDIO_AC4
};
const char *codecType = queryDecoders? "decoder" : "encoder";
@@ -708,7 +719,7 @@
sp<ALooper> looper;
int res;
- while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
+ while ((res = getopt(argc, argv, "vhaqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
switch (res) {
case 'a':
{
@@ -832,6 +843,12 @@
break;
}
+ case 'v':
+ {
+ gVerbose = true;
+ break;
+ }
+
case '?':
case 'h':
default:
diff --git a/include/media/EventLog.h b/include/media/EventLog.h
new file mode 120000
index 0000000..9b2c4bf
--- /dev/null
+++ b/include/media/EventLog.h
@@ -0,0 +1 @@
+../../media/utils/include/mediautils/EventLog.h
\ No newline at end of file
diff --git a/include/media/TimeCheck.h b/include/media/TimeCheck.h
index e3ef134..85e17f9 120000
--- a/include/media/TimeCheck.h
+++ b/include/media/TimeCheck.h
@@ -1 +1 @@
-../../media/libmedia/include/media/TimeCheck.h
\ No newline at end of file
+../../media/utils/include/mediautils/TimeCheck.h
\ No newline at end of file
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ca119d5..5f19f74 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -538,6 +538,10 @@
mTimestampMutator.push(timestamp);
}
+ virtual ExtendedTimestamp getTimestamp() const {
+ return mTimestampMutator.last();
+ }
+
// Flushes the shared ring buffer if the client had requested it using mStreaming.mFlush.
// If flush occurs then:
// cblk->u.mStreaming.mFront, ServerProxy::mFlush and ServerProxy::mFlushed will be modified
@@ -551,6 +555,9 @@
// Total count of the number of flushed frames since creation (never reset).
virtual int64_t framesFlushed() const { return mFlushed; }
+ // Safe frames ready query with no side effects.
+ virtual size_t framesReadySafe() const = 0;
+
// Get dynamic buffer size from the shared control block.
uint32_t getBufferSizeInFrames() const {
return android_atomic_acquire_load((int32_t *)&mCblk->mBufferSizeInFrames);
@@ -588,8 +595,7 @@
// which may include non-contiguous frames
virtual size_t framesReady();
- // Safe frames ready query used by dump() - this has no side effects.
- virtual size_t framesReadySafe() const;
+ size_t framesReadySafe() const override; // frames available to read by server.
// Currently AudioFlinger will call framesReady() for a fast track from two threads:
// FastMixer thread, and normal mixer thread. This is dangerous, as the proxy is intended
@@ -693,6 +699,8 @@
return mCblk->u.mStreaming.mRear; // For completeness only; mRear written by server.
}
+ size_t framesReadySafe() const override; // frames available to read by client.
+
protected:
virtual ~AudioRecordServerProxy() { }
};
diff --git a/media/OWNERS b/media/OWNERS
index 1f687a2..1e2d123 100644
--- a/media/OWNERS
+++ b/media/OWNERS
@@ -2,8 +2,10 @@
dwkang@google.com
elaurent@google.com
essick@google.com
+gkasten@google.com
hkuang@google.com
hunga@google.com
+jiabin@google.com
jmtrivi@google.com
krocard@google.com
lajos@google.com
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 70c281a..8a97299 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -21,6 +21,9 @@
libsoundtriggerservice \
libutils
+LOCAL_STATIC_LIBRARIES := \
+ libjsoncpp
+
# TODO oboeservice is the old folder name for aaudioservice. It will be changed.
LOCAL_C_INCLUDES := \
frameworks/av/services/audioflinger \
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index 1f2e82f..f1e815b 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -7,6 +7,7 @@
ioprio rt 4
writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
onrestart restart vendor.audio-hal-2-0
+ onrestart restart vendor.audio-hal-4-0-msd
# Keep the original service name for backward compatibility when upgrading
# O-MR1 devices with framework-only.
onrestart restart audio-hal-2-0
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
new file mode 100644
index 0000000..167d474
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -0,0 +1,624 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AC4Parser"
+
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include "AC4Parser.h"
+
+#define BOOLSTR(a) ((a)?"true":"false")
+#define BYTE_ALIGN mBitReader.skipBits(mBitReader.numBitsLeft() % 8)
+#define CHECK_BITS_LEFT(n) if (mBitReader.numBitsLeft() < n) {return false;}
+
+namespace android {
+
+AC4Parser::AC4Parser() {
+}
+
+AC4DSIParser::AC4DSIParser(ABitReader &br)
+ : mBitReader(br){
+
+ mDSISize = mBitReader.numBitsLeft();
+}
+
+// ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+static const char *ChannelModes[] = {
+ "mono",
+ "stereo",
+ "3.0",
+ "5.0",
+ "5.1",
+ "7.0 (3/4/0)",
+ "7.1 (3/4/0.1)",
+ "7.0 (5/2/0)",
+ "7.1 (5/2/0.1)",
+ "7.0 (3/2/2)",
+ "7.1 (3/2/2.1)",
+ "7.0.4",
+ "7.1.4",
+ "9.0.4",
+ "9.1.4",
+ "22.2"
+};
+
+static const char* ContentClassifier[] = {
+ "Complete Main",
+ "Music and Effects",
+ "Visually Impaired",
+ "Hearing Impaired",
+ "Dialog",
+ "Commentary",
+ "Emergency",
+ "Voice Over"
+};
+
+bool AC4DSIParser::parseLanguageTag(uint32_t presentationID, uint32_t substreamID){
+ CHECK_BITS_LEFT(6);
+ uint32_t n_language_tag_bytes = mBitReader.getBits(6);
+ if (n_language_tag_bytes < 2 || n_language_tag_bytes >= 42) {
+ return false;
+ }
+ CHECK_BITS_LEFT(n_language_tag_bytes * 8);
+ char language_tag_bytes[42]; // TS 103 190 part 1 4.3.3.8.7
+ for (uint32_t i = 0; i < n_language_tag_bytes; i++) {
+ language_tag_bytes[i] = (char)mBitReader.getBits(8);
+ }
+ language_tag_bytes[n_language_tag_bytes] = 0;
+ ALOGV("%u.%u: language_tag = %s\n", presentationID, substreamID, language_tag_bytes);
+
+ std::string language(language_tag_bytes, n_language_tag_bytes);
+ mPresentations[presentationID].mLanguage = language;
+
+ return true;
+}
+
+// TS 103 190-1 v1.2.1 E.5 and TS 103 190-2 v1.1.1 E.9
+bool AC4DSIParser::parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID){
+ CHECK_BITS_LEFT(5);
+ uint32_t channel_mode = mBitReader.getBits(5);
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+ CHECK_BITS_LEFT(1);
+ bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: channel_mode = %u (%s)\n", presentationID, substreamID, channel_mode,
+ channel_mode < NELEM(ChannelModes) ? ChannelModes[channel_mode] : "reserved");
+ ALOGV("%u.%u: dsi_sf_multiplier = %u\n", presentationID,
+ substreamID, dsi_sf_multiplier);
+ ALOGV("%u.%u: b_substream_bitrate_indicator = %s\n", presentationID,
+ substreamID, BOOLSTR(b_substream_bitrate_indicator));
+
+ if (b_substream_bitrate_indicator) {
+ CHECK_BITS_LEFT(5);
+ uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+ ALOGV("%u.%u: substream_bitrate_indicator = %u\n", presentationID, substreamID,
+ substream_bitrate_indicator);
+ }
+ if (channel_mode >= 7 && channel_mode <= 10) {
+ CHECK_BITS_LEFT(1);
+ uint32_t add_ch_base = mBitReader.getBits(1);
+ ALOGV("%u.%u: add_ch_base = %u\n", presentationID, substreamID, add_ch_base);
+ }
+ CHECK_BITS_LEFT(1);
+ bool b_content_type = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_content_type = %s\n", presentationID, substreamID, BOOLSTR(b_content_type));
+ if (b_content_type) {
+ CHECK_BITS_LEFT(3);
+ uint32_t content_classifier = mBitReader.getBits(3);
+ ALOGV("%u.%u: content_classifier = %u (%s)\n", presentationID, substreamID,
+ content_classifier, ContentClassifier[content_classifier]);
+
+ // For streams based on TS 103 190 part 1 the presentation level channel_mode doesn't
+ // exist and so we use the channel_mode from either the CM or M&E substream
+ // (they are mutually exclusive)
+ if (mPresentations[presentationID].mChannelMode == -1 &&
+ (content_classifier == 0 || content_classifier == 1)) {
+ mPresentations[presentationID].mChannelMode = channel_mode;
+ }
+ mPresentations[presentationID].mContentClassifier = content_classifier;
+ CHECK_BITS_LEFT(1);
+ bool b_language_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, substreamID,
+ BOOLSTR(b_language_indicator));
+ if (b_language_indicator) {
+ if (!parseLanguageTag(presentationID, substreamID)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+// ETSI TS 103 190-2 v1.1.1 section E.11
+bool AC4DSIParser::parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID)
+{
+ CHECK_BITS_LEFT(1);
+ bool b_substreams_present = (mBitReader.getBits(1) == 1);
+ CHECK_BITS_LEFT(1);
+ bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+ CHECK_BITS_LEFT(1);
+ bool b_channel_coded = (mBitReader.getBits(1) == 1);
+ CHECK_BITS_LEFT(8);
+ uint32_t n_substreams = mBitReader.getBits(8);
+ ALOGV("%u.%u: b_substreams_present = %s\n", presentationID, groupID,
+ BOOLSTR(b_substreams_present));
+ ALOGV("%u.%u: b_hsf_ext = %s\n", presentationID, groupID, BOOLSTR(b_hsf_ext));
+ ALOGV("%u.%u: b_channel_coded = %s\n", presentationID, groupID, BOOLSTR(b_channel_coded));
+ ALOGV("%u.%u: n_substreams = %u\n", presentationID, groupID, n_substreams);
+
+ for (uint32_t i = 0; i < n_substreams; i++) {
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+ CHECK_BITS_LEFT(1);
+ bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u.%u: dsi_sf_multiplier = %u\n", presentationID, groupID, i, dsi_sf_multiplier);
+ ALOGV("%u.%u.%u: b_substream_bitrate_indicator = %s\n", presentationID, groupID, i,
+ BOOLSTR(b_substream_bitrate_indicator));
+
+ if (b_substream_bitrate_indicator) {
+ CHECK_BITS_LEFT(5);
+ uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+ ALOGV("%u.%u.%u: substream_bitrate_indicator = %u\n", presentationID, groupID, i,
+ substream_bitrate_indicator);
+ }
+ if (b_channel_coded) {
+ CHECK_BITS_LEFT(24);
+ uint32_t dsi_substream_channel_mask = mBitReader.getBits(24);
+ ALOGV("%u.%u.%u: dsi_substream_channel_mask = 0x%06x\n", presentationID, groupID, i,
+ dsi_substream_channel_mask);
+ } else {
+ CHECK_BITS_LEFT(1);
+ bool b_ajoc = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u.%u: b_ajoc = %s\n", presentationID, groupID, i, BOOLSTR(b_ajoc));
+ if (b_ajoc) {
+ CHECK_BITS_LEFT(1);
+ bool b_static_dmx = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u.%u: b_static_dmx = %s\n", presentationID, groupID, i,
+ BOOLSTR(b_static_dmx));
+ if (!b_static_dmx) {
+ CHECK_BITS_LEFT(4);
+ uint32_t n_dmx_objects_minus1 = mBitReader.getBits(4);
+ ALOGV("%u.%u.%u: n_dmx_objects_minus1 = %u\n", presentationID, groupID, i,
+ n_dmx_objects_minus1);
+ }
+ CHECK_BITS_LEFT(6);
+ uint32_t n_umx_objects_minus1 = mBitReader.getBits(6);
+ ALOGV("%u.%u.%u: n_umx_objects_minus1 = %u\n", presentationID, groupID, i,
+ n_umx_objects_minus1);
+ }
+ CHECK_BITS_LEFT(4);
+ mBitReader.skipBits(4); // objects_assignment_mask
+ }
+ }
+
+ CHECK_BITS_LEFT(1);
+ bool b_content_type = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_content_type = %s\n", presentationID, groupID, BOOLSTR(b_content_type));
+ if (b_content_type) {
+ CHECK_BITS_LEFT(3);
+ uint32_t content_classifier = mBitReader.getBits(3);
+ ALOGV("%u.%u: content_classifier = %s (%u)\n", presentationID, groupID,
+ ContentClassifier[content_classifier], content_classifier);
+
+ mPresentations[presentationID].mContentClassifier = content_classifier;
+
+ CHECK_BITS_LEFT(1);
+ bool b_language_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, groupID,
+ BOOLSTR(b_language_indicator));
+
+ if (b_language_indicator) {
+ if (!parseLanguageTag(presentationID, groupID)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool AC4DSIParser::parseBitrateDsi() {
+ CHECK_BITS_LEFT(2 + 32 + 32);
+ mBitReader.skipBits(2); // bit_rate_mode
+ mBitReader.skipBits(32); // bit_rate
+ mBitReader.skipBits(32); // bit_rate_precision
+
+ return true;
+}
+
+// TS 103 190-1 section E.4 (ac4_dsi) and TS 103 190-2 section E.6 (ac4_dsi_v1)
+bool AC4DSIParser::parse() {
+ CHECK_BITS_LEFT(3);
+ uint32_t ac4_dsi_version = mBitReader.getBits(3);
+ if (ac4_dsi_version > 1) {
+ ALOGE("error while parsing ac-4 dsi: only versions 0 and 1 are supported");
+ return false;
+ }
+
+ CHECK_BITS_LEFT(7 + 1 + 4 + 9);
+ uint32_t bitstream_version = mBitReader.getBits(7);
+ mBitReader.skipBits(1); // fs_index
+ mBitReader.skipBits(4); // frame_rate_index
+ uint32_t n_presentations = mBitReader.getBits(9);
+
+ int32_t short_program_id = -1;
+ if (bitstream_version > 1) {
+ if (ac4_dsi_version == 0){
+ ALOGE("invalid ac4 dsi");
+ return false;
+ }
+ CHECK_BITS_LEFT(1);
+ bool b_program_id = (mBitReader.getBits(1) == 1);
+ if (b_program_id) {
+ CHECK_BITS_LEFT(16 + 1);
+ short_program_id = mBitReader.getBits(16);
+ bool b_uuid = (mBitReader.getBits(1) == 1);
+ if (b_uuid) {
+ const uint32_t kAC4UUIDSizeInBytes = 16;
+ char program_uuid[kAC4UUIDSizeInBytes];
+ CHECK_BITS_LEFT(kAC4UUIDSizeInBytes * 8);
+ for (uint32_t i = 0; i < kAC4UUIDSizeInBytes; i++) {
+ program_uuid[i] = (char)(mBitReader.getBits(8));
+ }
+ ALOGV("UUID = %s", program_uuid);
+ }
+ }
+ }
+
+ if (ac4_dsi_version == 1) {
+ if (!parseBitrateDsi()) {
+ return false;
+ }
+ BYTE_ALIGN;
+ }
+
+ for (uint32_t presentation = 0; presentation < n_presentations; presentation++) {
+ mPresentations[presentation].mProgramID = short_program_id;
+ // known as b_single_substream in ac4_dsi_version 0
+ bool b_single_substream_group = false;
+ uint32_t presentation_config = 0, presentation_version = 0;
+ uint32_t pres_bytes = 0;
+
+ if (ac4_dsi_version == 0) {
+ CHECK_BITS_LEFT(1 + 5 + 5);
+ b_single_substream_group = (mBitReader.getBits(1) == 1);
+ presentation_config = mBitReader.getBits(5);
+ presentation_version = mBitReader.getBits(5);
+ } else if (ac4_dsi_version == 1) {
+ CHECK_BITS_LEFT(8 + 8);
+ presentation_version = mBitReader.getBits(8);
+ pres_bytes = mBitReader.getBits(8);
+ if (pres_bytes == 0xff) {
+ CHECK_BITS_LEFT(16);
+ pres_bytes += mBitReader.getBits(16);
+ }
+ ALOGV("%u: pres_bytes = %u\n", presentation, pres_bytes);
+ if (presentation_version > 1) {
+ CHECK_BITS_LEFT(pres_bytes * 8);
+ mBitReader.skipBits(pres_bytes * 8);
+ continue;
+ }
+ // ac4_presentation_v0_dsi() and ac4_presentation_v1_dsi() both
+ // start with a presentation_config of 5 bits
+ CHECK_BITS_LEFT(5);
+ presentation_config = mBitReader.getBits(5);
+ b_single_substream_group = (presentation_config == 0x1f);
+ }
+
+ static const char *PresentationConfig[] = {
+ "Music&Effects + Dialog",
+ "Main + DE",
+ "Main + Associate",
+ "Music&Effects + Dialog + Associate",
+ "Main + DE + Associate",
+ "Arbitrary substream groups",
+ "EMDF only"
+ };
+ ALOGV("%u: b_single_substream/group = %s\n", presentation,
+ BOOLSTR(b_single_substream_group));
+ ALOGV("%u: presentation_version = %u\n", presentation, presentation_version);
+ ALOGV("%u: presentation_config = %u (%s)\n", presentation, presentation_config,
+ (presentation_config >= NELEM(PresentationConfig) ?
+ "reserved" : PresentationConfig[presentation_config]));
+
+ /* record a marker, less the size of the presentation_config */
+ uint64_t start = (mDSISize - mBitReader.numBitsLeft()) / 8;
+
+ bool b_add_emdf_substreams = false;
+ if (!b_single_substream_group && presentation_config == 6) {
+ b_add_emdf_substreams = true;
+ ALOGV("%u: b_add_emdf_substreams = %s\n", presentation, BOOLSTR(b_add_emdf_substreams));
+ } else {
+ CHECK_BITS_LEFT(3 + 1);
+ uint32_t mdcompat = mBitReader.getBits(3);
+ ALOGV("%u: mdcompat = %d\n", presentation, mdcompat);
+
+ bool b_presentation_group_index = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_presentation_group_index = %s\n", presentation,
+ BOOLSTR(b_presentation_group_index));
+ if (b_presentation_group_index) {
+ CHECK_BITS_LEFT(5);
+ mPresentations[presentation].mGroupIndex = mBitReader.getBits(5);
+ ALOGV("%u: presentation_group_index = %d\n", presentation,
+ mPresentations[presentation].mGroupIndex);
+ }
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_frame_rate_multiply_info = mBitReader.getBits(2);
+ ALOGV("%u: dsi_frame_rate_multiply_info = %d\n", presentation,
+ dsi_frame_rate_multiply_info);
+ if (ac4_dsi_version == 1 && presentation_version == 1) {
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_frame_rate_fraction_info = mBitReader.getBits(2);
+ ALOGV("%u: dsi_frame_rate_fraction_info = %d\n", presentation,
+ dsi_frame_rate_fraction_info);
+ }
+ CHECK_BITS_LEFT(5 + 10);
+ uint32_t presentation_emdf_version = mBitReader.getBits(5);
+ uint32_t presentation_key_id = mBitReader.getBits(10);
+ ALOGV("%u: presentation_emdf_version = %d\n", presentation, presentation_emdf_version);
+ ALOGV("%u: presentation_key_id = %d\n", presentation, presentation_key_id);
+
+ if (ac4_dsi_version == 1) {
+ bool b_presentation_channel_coded = false;
+ if (presentation_version == 0) {
+ b_presentation_channel_coded = true;
+ } else {
+ CHECK_BITS_LEFT(1);
+ b_presentation_channel_coded = (mBitReader.getBits(1) == 1);
+ }
+ ALOGV("%u: b_presentation_channel_coded = %s\n", presentation,
+ BOOLSTR(b_presentation_channel_coded));
+ if (b_presentation_channel_coded) {
+ if (presentation_version == 1) {
+ CHECK_BITS_LEFT(5);
+ uint32_t dsi_presentation_ch_mode = mBitReader.getBits(5);
+ mPresentations[presentation].mChannelMode = dsi_presentation_ch_mode;
+ ALOGV("%u: dsi_presentation_ch_mode = %d (%s)\n", presentation,
+ dsi_presentation_ch_mode,
+ dsi_presentation_ch_mode < NELEM(ChannelModes) ?
+ ChannelModes[dsi_presentation_ch_mode] : "reserved");
+
+ if (dsi_presentation_ch_mode >= 11 && dsi_presentation_ch_mode <= 14) {
+ CHECK_BITS_LEFT(1 + 2);
+ uint32_t pres_b_4_back_channels_present = mBitReader.getBits(1);
+ uint32_t pres_top_channel_pairs = mBitReader.getBits(2);
+ ALOGV("%u: pres_b_4_back_channels_present = %s\n", presentation,
+ BOOLSTR(pres_b_4_back_channels_present));
+ ALOGV("%u: pres_top_channel_pairs = %d\n", presentation,
+ pres_top_channel_pairs);
+ }
+ }
+ // presentation_channel_mask in ac4_presentation_v0_dsi()
+ CHECK_BITS_LEFT(24);
+ uint32_t presentation_channel_mask_v1 = mBitReader.getBits(24);
+ ALOGV("%u: presentation_channel_mask_v1 = 0x%06x\n", presentation,
+ presentation_channel_mask_v1);
+ }
+ if (presentation_version == 1) {
+ CHECK_BITS_LEFT(1);
+ bool b_presentation_core_differs = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_presentation_core_differs = %s\n", presentation,
+ BOOLSTR(b_presentation_core_differs));
+ if (b_presentation_core_differs) {
+ CHECK_BITS_LEFT(1);
+ bool b_presentation_core_channel_coded = (mBitReader.getBits(1) == 1);
+ if (b_presentation_core_channel_coded) {
+ CHECK_BITS_LEFT(2);
+ mBitReader.skipBits(2); // dsi_presentation_channel_mode_core
+ }
+ }
+ CHECK_BITS_LEFT(1);
+ bool b_presentation_filter = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_presentation_filter = %s\n", presentation,
+ BOOLSTR(b_presentation_filter));
+ if (b_presentation_filter) {
+ CHECK_BITS_LEFT(1 + 8);
+ bool b_enable_presentation = (mBitReader.getBits(1) == 1);
+ if (!b_enable_presentation) {
+ mPresentations[presentation].mEnabled = false;
+ }
+ ALOGV("%u: b_enable_presentation = %s\n", presentation,
+ BOOLSTR(b_enable_presentation));
+ uint32_t n_filter_bytes = mBitReader.getBits(8);
+ CHECK_BITS_LEFT(n_filter_bytes * 8);
+ for (uint32_t i = 0; i < n_filter_bytes; i++) {
+ mBitReader.skipBits(8); // filter_data
+ }
+ }
+ }
+ } /* ac4_dsi_version == 1 */
+
+ if (b_single_substream_group) {
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ } else {
+ if (!parseSubstreamGroupDSI(presentation, 0)) {
+ return false;
+ }
+ }
+ } else {
+ if (ac4_dsi_version == 1) {
+ CHECK_BITS_LEFT(1);
+ bool b_multi_pid = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_multi_pid = %s\n", presentation, BOOLSTR(b_multi_pid));
+ } else {
+ CHECK_BITS_LEFT(1);
+ bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_hsf_ext = %s\n", presentation, BOOLSTR(b_hsf_ext));
+ }
+ switch (presentation_config) {
+ case 0:
+ case 1:
+ case 2:
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamDSI(presentation, 1)) {
+ return false;
+ }
+ } else {
+ if (!parseSubstreamGroupDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamGroupDSI(presentation, 1)) {
+ return false;
+ }
+ }
+ break;
+ case 3:
+ case 4:
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamDSI(presentation, 1)) {
+ return false;
+ }
+ if (!parseSubstreamDSI(presentation, 2)) {
+ return false;
+ }
+ } else {
+ if (!parseSubstreamGroupDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamGroupDSI(presentation, 1)) {
+ return false;
+ }
+ if (!parseSubstreamGroupDSI(presentation, 2)) {
+ return false;
+ }
+ }
+ break;
+ case 5:
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ } else {
+ CHECK_BITS_LEFT(3);
+ uint32_t n_substream_groups_minus2 = mBitReader.getBits(3);
+ ALOGV("%u: n_substream_groups_minus2 = %d\n", presentation,
+ n_substream_groups_minus2);
+ for (uint32_t sg = 0; sg < n_substream_groups_minus2 + 2; sg++) {
+ if (!parseSubstreamGroupDSI(presentation, sg)) {
+ return false;
+ }
+ }
+ }
+ break;
+ default:
+ CHECK_BITS_LEFT(7);
+ uint32_t n_skip_bytes = mBitReader.getBits(7);
+ CHECK_BITS_LEFT(n_skip_bytes * 8)
+ for (uint32_t j = 0; j < n_skip_bytes; j++) {
+ mBitReader.getBits(8);
+ }
+ break;
+ }
+ CHECK_BITS_LEFT(1 + 1);
+ bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
+ mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
+ b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
+ ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
+ BOOLSTR(b_add_emdf_substreams));
+ }
+ }
+ if (b_add_emdf_substreams) {
+ CHECK_BITS_LEFT(7);
+ uint32_t n_add_emdf_substreams = mBitReader.getBits(7);
+ for (uint32_t j = 0; j < n_add_emdf_substreams; j++) {
+ CHECK_BITS_LEFT(5 + 10);
+ uint32_t substream_emdf_version = mBitReader.getBits(5);
+ uint32_t substream_key_id = mBitReader.getBits(10);
+ ALOGV("%u: emdf_substream[%d]: version=%d, key_id=%d\n", presentation, j,
+ substream_emdf_version, substream_key_id);
+ }
+ }
+
+ bool b_presentation_bitrate_info = false;
+ if (presentation_version > 0) {
+ CHECK_BITS_LEFT(1);
+ b_presentation_bitrate_info = (mBitReader.getBits(1) == 1);
+ }
+
+ ALOGV("b_presentation_bitrate_info = %s\n", BOOLSTR(b_presentation_bitrate_info));
+ if (b_presentation_bitrate_info) {
+ if (!parseBitrateDsi()) {
+ return false;
+ }
+ }
+
+ if (presentation_version > 0) {
+ CHECK_BITS_LEFT(1);
+ bool b_alternative = (mBitReader.getBits(1) == 1);
+ ALOGV("b_alternative = %s\n", BOOLSTR(b_alternative));
+ if (b_alternative) {
+ BYTE_ALIGN;
+ CHECK_BITS_LEFT(16);
+ uint32_t name_len = mBitReader.getBits(16);
+ char* presentation_name = new char[name_len+1];
+ CHECK_BITS_LEFT(name_len * 8);
+ for (uint32_t i = 0; i < name_len; i++) {
+ presentation_name[i] = (char)(mBitReader.getBits(8));
+ }
+ presentation_name[name_len] = '\0';
+ std::string description(presentation_name, name_len);
+ mPresentations[presentation].mDescription = description;
+ CHECK_BITS_LEFT(5);
+ uint32_t n_targets = mBitReader.getBits(5);
+ CHECK_BITS_LEFT(n_targets * (3 + 8));
+ for (uint32_t i = 0; i < n_targets; i++){
+ mBitReader.skipBits(3); // target_md_compat
+ mBitReader.skipBits(8); // target_device_category
+ }
+ }
+ }
+
+ BYTE_ALIGN;
+
+ if (ac4_dsi_version == 1) {
+ uint64_t end = (mDSISize - mBitReader.numBitsLeft()) / 8;
+ if (mBitReader.numBitsLeft() % 8 != 0) {
+ end += 1;
+ }
+
+ uint64_t presentation_bytes = end - start;
+ uint64_t skip_bytes = pres_bytes - presentation_bytes;
+ ALOGV("skipping = %" PRIu64 " bytes", skip_bytes);
+ CHECK_BITS_LEFT(skip_bytes * 8);
+ mBitReader.skipBits(skip_bytes * 8);
+ }
+
+ // we should know this or something is probably wrong
+ // with the bitstream (or we don't support it)
+ if (mPresentations[presentation].mChannelMode == -1){
+ ALOGE("could not determing channel mode of presentation %d", presentation);
+ return false;
+ }
+ } /* each presentation */
+
+ return true;
+}
+
+};
diff --git a/media/extractors/mp4/AC4Parser.h b/media/extractors/mp4/AC4Parser.h
new file mode 100644
index 0000000..73b6e31
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AC4_PARSER_H_
+#define AC4_PARSER_H_
+
+#include <cstdint>
+#include <map>
+#include <string>
+
+#include <media/stagefright/foundation/ABitReader.h>
+
+namespace android {
+
+class AC4Parser {
+public:
+ AC4Parser();
+ virtual ~AC4Parser() { }
+
+ virtual bool parse() = 0;
+
+ struct AC4Presentation {
+ int32_t mChannelMode = -1;
+ int32_t mProgramID = -1;
+ int32_t mGroupIndex = -1;
+
+ // TS 103 190-1 v1.2.1 4.3.3.8.1
+ enum ContentClassifiers {
+ kCompleteMain,
+ kMusicAndEffects,
+ kVisuallyImpaired,
+ kHearingImpaired,
+ kDialog,
+ kCommentary,
+ kEmergency,
+ kVoiceOver
+ };
+
+ uint32_t mContentClassifier = kCompleteMain;
+
+ // ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+ enum InputChannelMode {
+ kChannelMode_Mono,
+ kChannelMode_Stereo,
+ kChannelMode_3_0,
+ kChannelMode_5_0,
+ kChannelMode_5_1,
+ kChannelMode_7_0_34,
+ kChannelMode_7_1_34,
+ kChannelMode_7_0_52,
+ kChannelMode_7_1_52,
+ kChannelMode_7_0_322,
+ kChannelMode_7_1_322,
+ kChannelMode_7_0_4,
+ kChannelMode_7_1_4,
+ kChannelMode_9_0_4,
+ kChannelMode_9_1_4,
+ kChannelMode_22_2,
+ kChannelMode_Reserved,
+ };
+
+ bool mHasDialogEnhancements = false;
+ bool mPreVirtualized = false;
+ bool mEnabled = true;
+
+ std::string mLanguage;
+ std::string mDescription;
+ };
+ typedef std::map<uint32_t, AC4Presentation> AC4Presentations;
+
+ const AC4Presentations& getPresentations() const { return mPresentations; }
+
+protected:
+ AC4Presentations mPresentations;
+};
+
+class AC4DSIParser: public AC4Parser {
+public:
+ explicit AC4DSIParser(ABitReader &br);
+ virtual ~AC4DSIParser() { }
+
+ bool parse();
+
+private:
+ bool parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID);
+ bool parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID);
+ bool parseLanguageTag(uint32_t presentationID, uint32_t substreamID);
+ bool parseBitrateDsi();
+
+ uint64_t mDSISize;
+ ABitReader& mBitReader;
+};
+
+};
+
+#endif // AC4_PARSER_H_
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index fa739e8..40b2c97 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -2,6 +2,7 @@
name: "libmp4extractor_defaults",
srcs: [
+ "AC4Parser.cpp",
"ItemTable.cpp",
"MPEG4Extractor.cpp",
"SampleIterator.cpp",
diff --git a/media/extractors/mp4/ItemTable.cpp b/media/extractors/mp4/ItemTable.cpp
index ca9deab..be442e6 100644
--- a/media/extractors/mp4/ItemTable.cpp
+++ b/media/extractors/mp4/ItemTable.cpp
@@ -1529,12 +1529,16 @@
if (thumbItemIndex >= 0) {
const ImageItem &thumbnail = mItemIdToItemMap[thumbItemIndex];
- meta->setInt32(kKeyThumbnailWidth, thumbnail.width);
- meta->setInt32(kKeyThumbnailHeight, thumbnail.height);
- meta->setData(kKeyThumbnailHVCC, kTypeHVCC,
- thumbnail.hvcc->data(), thumbnail.hvcc->size());
- ALOGV("image[%u]: thumbnail: size %dx%d, item index %zd",
- imageIndex, thumbnail.width, thumbnail.height, thumbItemIndex);
+ if (thumbnail.hvcc != NULL) {
+ meta->setInt32(kKeyThumbnailWidth, thumbnail.width);
+ meta->setInt32(kKeyThumbnailHeight, thumbnail.height);
+ meta->setData(kKeyThumbnailHVCC, kTypeHVCC,
+ thumbnail.hvcc->data(), thumbnail.hvcc->size());
+ ALOGV("image[%u]: thumbnail: size %dx%d, item index %zd",
+ imageIndex, thumbnail.width, thumbnail.height, thumbItemIndex);
+ } else {
+ ALOGW("%s: thumbnail data is missing for image[%u]!", __FUNCTION__, imageIndex);
+ }
} else {
ALOGW("%s: Referenced thumbnail does not exist!", __FUNCTION__);
}
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 7b3b81d..fe9f99c 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -26,6 +26,7 @@
#include <utils/Log.h>
+#include "AC4Parser.h"
#include "MPEG4Extractor.h"
#include "SampleTable.h"
#include "ItemTable.h"
@@ -125,6 +126,8 @@
bool mIsAVC;
bool mIsHEVC;
+ bool mIsAC4;
+
size_t mNALLengthSize;
bool mStarted;
@@ -310,6 +313,9 @@
case FOURCC('s', 'a', 'w', 'b'):
return MEDIA_MIMETYPE_AUDIO_AMR_WB;
+ case FOURCC('e', 'c', '-', '3'):
+ return MEDIA_MIMETYPE_AUDIO_EAC3;
+
case FOURCC('m', 'p', '4', 'v'):
return MEDIA_MIMETYPE_VIDEO_MPEG4;
@@ -324,6 +330,8 @@
case FOURCC('h', 'v', 'c', '1'):
case FOURCC('h', 'e', 'v', '1'):
return MEDIA_MIMETYPE_VIDEO_HEVC;
+ case FOURCC('a', 'c', '-', '4'):
+ return MEDIA_MIMETYPE_AUDIO_AC4;
default:
ALOGW("Unknown fourcc: %c%c%c%c",
(fourcc >> 24) & 0xff,
@@ -2433,7 +2441,19 @@
case FOURCC('a', 'c', '-', '3'):
{
*offset += chunk_size;
- return parseAC3SampleEntry(data_offset);
+ return parseAC3SpecificBox(data_offset);
+ }
+
+ case FOURCC('e', 'c', '-', '3'):
+ {
+ *offset += chunk_size;
+ return parseEAC3SpecificBox(data_offset);
+ }
+
+ case FOURCC('a', 'c', '-', '4'):
+ {
+ *offset += chunk_size;
+ return parseAC4SpecificBox(data_offset);
}
case FOURCC('f', 't', 'y', 'p'):
@@ -2507,36 +2527,260 @@
return OK;
}
-status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
+status_t MPEG4Extractor::parseChannelCountSampleRate(
+ off64_t *offset, uint16_t *channelCount, uint16_t *sampleRate) {
// skip 16 bytes:
// + 6-byte reserved,
// + 2-byte data reference index,
// + 8-byte reserved
- offset += 16;
- uint16_t channelCount;
- if (!mDataSource->getUInt16(offset, &channelCount)) {
+ *offset += 16;
+ if (!mDataSource->getUInt16(*offset, channelCount)) {
+ ALOGE("MPEG4Extractor: error while reading sample entry box: cannot read channel count");
return ERROR_MALFORMED;
}
// skip 8 bytes:
// + 2-byte channelCount,
// + 2-byte sample size,
// + 4-byte reserved
- offset += 8;
- uint16_t sampleRate;
- if (!mDataSource->getUInt16(offset, &sampleRate)) {
- ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read sample rate");
+ *offset += 8;
+ if (!mDataSource->getUInt16(*offset, sampleRate)) {
+ ALOGE("MPEG4Extractor: error while reading sample entry box: cannot read sample rate");
return ERROR_MALFORMED;
}
-
// skip 4 bytes:
// + 2-byte sampleRate,
// + 2-byte reserved
- offset += 4;
- return parseAC3SpecificBox(offset, sampleRate);
+ *offset += 4;
+ return OK;
}
-status_t MPEG4Extractor::parseAC3SpecificBox(
- off64_t offset, uint16_t sampleRate) {
+status_t MPEG4Extractor::parseAC4SpecificBox(off64_t offset) {
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint16_t sampleRate, channelCount;
+ status_t status;
+ if ((status = parseChannelCountSampleRate(&offset, &channelCount, &sampleRate)) != OK) {
+ return status;
+ }
+ uint32_t size;
+ // + 4-byte size
+ // + 4-byte type
+ // + 3-byte payload
+ const uint32_t kAC4MinimumBoxSize = 4 + 4 + 3;
+ if (!mDataSource->getUInt32(offset, &size) || size < kAC4MinimumBoxSize) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 block: cannot read specific box size");
+ return ERROR_MALFORMED;
+ }
+
+ // + 4-byte size
+ offset += 4;
+ uint32_t type;
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '4')) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 specific block: header not dac4");
+ return ERROR_MALFORMED;
+ }
+
+ // + 4-byte type
+ offset += 4;
+ // at least for AC4 DSI v1 this is big enough
+ const uint32_t kAC4SpecificBoxPayloadSize = 256;
+ uint8_t chunk[kAC4SpecificBoxPayloadSize];
+ ssize_t dsiSize = size - 8; // size of box - size and type fields
+ if (dsiSize >= (ssize_t)kAC4SpecificBoxPayloadSize ||
+ mDataSource->readAt(offset, chunk, dsiSize) != dsiSize) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 specific block: bitstream fields");
+ return ERROR_MALFORMED;
+ }
+ // + size-byte payload
+ offset += dsiSize;
+ ABitReader br(chunk, dsiSize);
+ AC4DSIParser parser(br);
+ if (!parser.parse()){
+ ALOGE("MPEG4Extractor: error while parsing ac-4 specific block");
+ return ERROR_MALFORMED;
+ }
+
+ mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+ mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
+ mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+ return OK;
+}
+
+status_t MPEG4Extractor::parseEAC3SpecificBox(off64_t offset) {
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint16_t sampleRate, channels;
+ status_t status;
+ if ((status = parseChannelCountSampleRate(&offset, &channels, &sampleRate)) != OK) {
+ return status;
+ }
+ uint32_t size;
+ // + 4-byte size
+ // + 4-byte type
+ // + 3-byte payload
+ const uint32_t kEAC3SpecificBoxMinSize = 11;
+ // 13 + 3 + (8 * (2 + 5 + 5 + 3 + 1 + 3 + 4 + (14 * 9 + 1))) bits == 152 bytes theoretical max
+ // calculated from the required bits read below as well as the maximum number of independent
+ // and dependant sub streams you can have
+ const uint32_t kEAC3SpecificBoxMaxSize = 152;
+ if (!mDataSource->getUInt32(offset, &size) ||
+ size < kEAC3SpecificBoxMinSize ||
+ size > kEAC3SpecificBoxMaxSize) {
+ ALOGE("MPEG4Extractor: error while reading eac-3 block: cannot read specific box size");
+ return ERROR_MALFORMED;
+ }
+
+ offset += 4;
+ uint32_t type;
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'e', 'c', '3')) {
+ ALOGE("MPEG4Extractor: error while reading eac-3 specific block: header not dec3");
+ return ERROR_MALFORMED;
+ }
+
+ offset += 4;
+ uint8_t* chunk = new (std::nothrow) uint8_t[size];
+ if (chunk == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(offset, chunk, size) != (ssize_t)size) {
+ ALOGE("MPEG4Extractor: error while reading eac-3 specific block: bitstream fields");
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+
+ ABitReader br(chunk, size);
+ static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+ static const unsigned sampleRateTable[] = {48000, 44100, 32000};
+
+ if (br.numBitsLeft() < 16) {
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+ unsigned data_rate = br.getBits(13);
+ ALOGV("EAC3 data rate = %d", data_rate);
+
+ unsigned num_ind_sub = br.getBits(3) + 1;
+ ALOGV("EAC3 independant substreams = %d", num_ind_sub);
+ if (br.numBitsLeft() < (num_ind_sub * 23)) {
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+
+ unsigned channelCount = 0;
+ for (unsigned i = 0; i < num_ind_sub; i++) {
+ unsigned fscod = br.getBits(2);
+ if (fscod == 3) {
+ ALOGE("Incorrect fscod (3) in EAC3 header");
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+ unsigned boxSampleRate = sampleRateTable[fscod];
+ if (boxSampleRate != sampleRate) {
+ ALOGE("sample rate mismatch: boxSampleRate = %d, sampleRate = %d",
+ boxSampleRate, sampleRate);
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+
+ unsigned bsid = br.getBits(5);
+ if (bsid < 8) {
+ ALOGW("Incorrect bsid in EAC3 header. Possibly AC-3?");
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+
+ // skip
+ br.skipBits(2);
+ unsigned bsmod = br.getBits(3);
+ unsigned acmod = br.getBits(3);
+ unsigned lfeon = br.getBits(1);
+ // we currently only support the first stream
+ if (i == 0)
+ channelCount = channelCountTable[acmod] + lfeon;
+ ALOGV("bsmod = %d, acmod = %d, lfeon = %d", bsmod, acmod, lfeon);
+
+ br.skipBits(3);
+ unsigned num_dep_sub = br.getBits(4);
+ ALOGV("EAC3 dependant substreams = %d", num_dep_sub);
+ if (num_dep_sub != 0) {
+ if (br.numBitsLeft() < 9) {
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+ static const char* chan_loc_tbl[] = { "Lc/Rc","Lrs/Rrs","Cs","Ts","Lsd/Rsd",
+ "Lw/Rw","Lvh/Rvh","Cvh","Lfe2" };
+ unsigned chan_loc = br.getBits(9);
+ unsigned mask = 1;
+ for (unsigned j = 0; j < 9; j++, mask <<= 1) {
+ if ((chan_loc & mask) != 0) {
+ // we currently only support the first stream
+ if (i == 0) {
+ channelCount++;
+ // these are 2 channels in the mask
+ if (j == 0 || j == 1 || j == 4 || j == 5 || j == 6) {
+ channelCount++;
+ }
+ }
+ ALOGV(" %s", chan_loc_tbl[j]);
+ }
+ }
+ } else {
+ if (br.numBitsLeft() == 0) {
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+ br.skipBits(1);
+ }
+ }
+
+ if (br.numBitsLeft() != 0) {
+ if (br.numBitsLeft() < 8) {
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+ unsigned mask = br.getBits(8);
+ for (unsigned i = 0; i < 8; i++) {
+ if (((0x1 << i) && mask) == 0)
+ continue;
+
+ if (br.numBitsLeft() < 8) {
+ delete[] chunk;
+ return ERROR_MALFORMED;
+ }
+ switch (i) {
+ case 0: {
+ unsigned complexity = br.getBits(8);
+ ALOGV("Found a JOC stream with complexity = %d", complexity);
+ }break;
+ default: {
+ br.skipBits(8);
+ }break;
+ }
+ }
+ }
+ mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EAC3);
+ mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
+ mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+
+ delete[] chunk;
+ return OK;
+}
+
+status_t MPEG4Extractor::parseAC3SpecificBox(off64_t offset) {
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint16_t sampleRate, channels;
+ status_t status;
+ if ((status = parseChannelCountSampleRate(&offset, &channels, &sampleRate)) != OK) {
+ return status;
+ }
uint32_t size;
// + 4-byte size
// + 4-byte type
@@ -2591,9 +2835,6 @@
unsigned lfeon = br.getBits(1);
unsigned channelCount = channelCountTable[acmod] + lfeon;
- if (mLastTrack == NULL) {
- return ERROR_MALFORMED;
- }
mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
@@ -3857,6 +4098,7 @@
mCurrentSampleInfoOffsets(NULL),
mIsAVC(false),
mIsHEVC(false),
+ mIsAC4(false),
mNALLengthSize(0),
mStarted(false),
mGroup(NULL),
@@ -3890,6 +4132,7 @@
mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+ mIsAC4 = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4);
if (mIsAVC) {
uint32_t type;
@@ -4830,7 +5073,7 @@
}
}
- if ((!mIsAVC && !mIsHEVC) || mWantsNALFragments) {
+ if ((!mIsAVC && !mIsHEVC && !mIsAC4) || mWantsNALFragments) {
if (newBuffer) {
ssize_t num_bytes_read =
mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
@@ -4862,13 +5105,20 @@
++mCurrentSampleIndex;
}
- if (!mIsAVC && !mIsHEVC) {
+ if (!mIsAVC && !mIsHEVC && !mIsAC4) {
*out = mBuffer;
mBuffer = NULL;
return OK;
}
+ if (mIsAC4) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
// Each NAL unit is split up into its constituent fragments and
// each one of them returned in its own buffer.
@@ -4907,6 +5157,58 @@
*out = clone;
return OK;
+ } else if (mIsAC4) {
+ CHECK(mBuffer != NULL);
+ // Make sure there is enough space to write the sync header and the raw frame
+ if (mBuffer->range_length() < (7 + size)) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ uint8_t *dstData = (uint8_t *)mBuffer->data();
+ size_t dstOffset = 0;
+ // Add AC-4 sync header to MPEG4 encapsulated AC-4 raw frame
+ // AC40 sync word, meaning no CRC at the end of the frame
+ dstData[dstOffset++] = 0xAC;
+ dstData[dstOffset++] = 0x40;
+ dstData[dstOffset++] = 0xFF;
+ dstData[dstOffset++] = 0xFF;
+ dstData[dstOffset++] = (uint8_t)((size >> 16) & 0xFF);
+ dstData[dstOffset++] = (uint8_t)((size >> 8) & 0xFF);
+ dstData[dstOffset++] = (uint8_t)((size >> 0) & 0xFF);
+
+ ssize_t numBytesRead = mDataSource->readAt(offset, dstData + dstOffset, size);
+ if (numBytesRead != (ssize_t)size) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ mBuffer->set_range(0, dstOffset + size);
+ mBuffer->meta_data().clear();
+ mBuffer->meta_data().setInt64(
+ kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data().setInt64(
+ kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+
+ if (targetSampleTimeUs >= 0) {
+ mBuffer->meta_data().setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
+ if (isSyncSample) {
+ mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ }
+
+ ++mCurrentSampleIndex;
+
+ *out = mBuffer;
+ mBuffer = NULL;
+
+ return OK;
} else {
// Whole NAL units are returned but each fragment is prefixed by
// the start code (0x00 00 00 01).
@@ -5081,9 +5383,13 @@
uint32_t cts = 0;
bool isSyncSample = false;
bool newBuffer = false;
- if (mBuffer == NULL) {
+ if (mBuffer == NULL || mCurrentSampleIndex >= mCurrentSamples.size()) {
newBuffer = true;
+ if (mBuffer != NULL) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
if (mCurrentSampleIndex >= mCurrentSamples.size()) {
// move to next fragment if there is one
if (mNextMoofOffset <= mCurrentMoofOffset) {
@@ -5361,6 +5667,8 @@
return OK;
}
+
+ return OK;
}
MPEG4Extractor::Track *MPEG4Extractor::findTrackByMimePrefix(
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 3ea0963..a4a5684 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -139,8 +139,11 @@
Track *findTrackByMimePrefix(const char *mimePrefix);
- status_t parseAC3SampleEntry(off64_t offset);
- status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+ status_t parseChannelCountSampleRate(
+ off64_t *offset, uint16_t *channelCount, uint16_t *sampleRate);
+ status_t parseAC3SpecificBox(off64_t offset);
+ status_t parseEAC3SpecificBox(off64_t offset);
+ status_t parseAC4SpecificBox(off64_t offset);
MPEG4Extractor(const MPEG4Extractor &);
MPEG4Extractor &operator=(const MPEG4Extractor &);
diff --git a/media/extractors/mp4/SampleIterator.cpp b/media/extractors/mp4/SampleIterator.cpp
index 93ee7c6..1a6d306 100644
--- a/media/extractors/mp4/SampleIterator.cpp
+++ b/media/extractors/mp4/SampleIterator.cpp
@@ -328,7 +328,15 @@
++mTimeToSampleIndex;
}
- *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
+ // below is equivalent to:
+ // *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
+ uint32_t tmp;
+ if (__builtin_sub_overflow(sampleIndex, mTTSSampleIndex, &tmp) ||
+ __builtin_mul_overflow(mTTSDuration, tmp, &tmp) ||
+ __builtin_add_overflow(mTTSSampleTime, tmp, &tmp)) {
+ return ERROR_OUT_OF_RANGE;
+ }
+ *time = tmp;
int32_t offset = mTable->getCompositionTimeOffset(sampleIndex);
if ((offset < 0 && *time < (offset == INT32_MIN ?
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
deleted file mode 100644
index a0b981c..0000000
--- a/media/libaaudio/examples/input_monitor/jni/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src \
- frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/input_monitor.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := input_monitor_callback
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libaaudio_prebuilt
-LOCAL_SRC_FILES := libaaudio.so
-LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
-include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/input_monitor/jni/Application.mk b/media/libaaudio/examples/input_monitor/jni/Application.mk
deleted file mode 100644
index e74475c..0000000
--- a/media/libaaudio/examples/input_monitor/jni/Application.mk
+++ /dev/null
@@ -1,3 +0,0 @@
-# TODO remove then when we support other architectures
-APP_ABI := arm64-v8a
-APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/loopback/jni/Android.mk b/media/libaaudio/examples/loopback/jni/Android.mk
deleted file mode 100644
index aebe877..0000000
--- a/media/libaaudio/examples/loopback/jni/Android.mk
+++ /dev/null
@@ -1,16 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/loopback.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_STATIC_LIBRARIES := libsndfile
-LOCAL_SHARED_LIBRARIES := libaaudio libaudioutils
-LOCAL_MODULE := aaudio_loopback
-include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/loopback/jni/Application.mk b/media/libaaudio/examples/loopback/jni/Application.mk
deleted file mode 100644
index ba44f37..0000000
--- a/media/libaaudio/examples/loopback/jni/Application.mk
+++ /dev/null
@@ -1 +0,0 @@
-APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 91ebf73..84f9c22 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -338,7 +338,7 @@
aaudio_sharing_mode_t requestedInputSharingMode = AAUDIO_SHARING_MODE_SHARED;
int requestedInputChannelCount = NUM_INPUT_CHANNELS;
aaudio_format_t requestedInputFormat = AAUDIO_FORMAT_UNSPECIFIED;
- int32_t requestedInputCapacity = -1;
+ int32_t requestedInputCapacity = AAUDIO_UNSPECIFIED;
aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
int32_t outputFramesPerBurst = 0;
@@ -459,15 +459,8 @@
argParser.setPerformanceMode(inputPerformanceLevel);
argParser.setChannelCount(requestedInputChannelCount);
argParser.setSharingMode(requestedInputSharingMode);
-
- // Make sure the input buffer has plenty of capacity.
- // Extra capacity on input should not increase latency if we keep it drained.
- int32_t inputBufferCapacity = requestedInputCapacity;
- if (inputBufferCapacity < 0) {
- int32_t outputBufferCapacity = AAudioStream_getBufferCapacityInFrames(outputStream);
- inputBufferCapacity = 2 * outputBufferCapacity;
- }
- argParser.setBufferCapacity(inputBufferCapacity);
+ // Warning! If you change input capacity then you may not get a FAST track on Legacy path.
+ argParser.setBufferCapacity(requestedInputCapacity);
result = recorder.open(argParser);
if (result != AAUDIO_OK) {
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index 88d7401..0e61589 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -272,7 +272,9 @@
if (strlen(arg) > 2) {
policy = atoi(&arg[2]);
}
- AAudio_setMMapPolicy(policy);
+ if (!AAudio_setMMapPolicy(policy)) {
+ printf("ERROR: invalid MMAP policy mode %i\n", policy);
+ }
} break;
case 'n':
setNumberOfBursts(atoi(&arg[2]));
@@ -363,7 +365,7 @@
mode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
break;
default:
- printf("ERROR invalid performance mode %c\n", c);
+ printf("ERROR: invalid performance mode %c\n", c);
break;
}
return mode;
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
deleted file mode 100644
index 1a1bd43..0000000
--- a/media/libaaudio/examples/write_sine/jni/Android.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src \
- frameworks/av/media/libaaudio/examples/utils
-
-# NDK recommends using this kind of relative path instead of an absolute path.
-LOCAL_SRC_FILES:= ../src/write_sine.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE_TAGS := tests
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/examples/utils
-
-LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := write_sine_callback
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := libaaudio_prebuilt
-LOCAL_SRC_FILES := libaaudio.so
-LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
-include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/write_sine/jni/Application.mk b/media/libaaudio/examples/write_sine/jni/Application.mk
deleted file mode 100644
index ba44f37..0000000
--- a/media/libaaudio/examples/write_sine/jni/Application.mk
+++ /dev/null
@@ -1 +0,0 @@
-APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 3a7a578..4ef765d 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -150,6 +150,11 @@
allowMMap = false;
}
+ if (!allowMMap && !allowLegacy) {
+ ALOGE("%s() no backend available: neither MMAP nor legacy path are allowed", __func__);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+
result = builder_createStream(getDirection(), sharingMode, allowMMap, &audioStream);
if (result == AAUDIO_OK) {
// Open the stream using the parameters from the builder.
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 2df37a8..6146c0e 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -49,6 +49,7 @@
"libaudiomanager",
"libmedia_helper",
"libmediametrics",
+ "libmediautils",
],
export_shared_lib_headers: ["libbinder"],
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index b40f0db..da7d85e 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -432,14 +432,15 @@
}
status_t AudioEffect::getEffectDescriptor(const effect_uuid_t *uuid,
- effect_descriptor_t *descriptor) /*const*/
+ const effect_uuid_t *type,
+ uint32_t preferredTypeFlag,
+ effect_descriptor_t *descriptor)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- return af->getEffectDescriptor(uuid, descriptor);
+ return af->getEffectDescriptor(uuid, type, preferredTypeFlag, descriptor);
}
-
status_t AudioEffect::queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count)
@@ -448,6 +449,55 @@
if (aps == 0) return PERMISSION_DENIED;
return aps->queryDefaultPreProcessing(audioSession, descriptors, count);
}
+
+status_t AudioEffect::newEffectUniqueId(audio_unique_id_t* id)
+{
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ *id = af->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
+ return NO_ERROR;
+}
+
+status_t AudioEffect::addStreamDefaultEffect(const char *typeStr,
+ const String16& opPackageName,
+ const char *uuidStr,
+ int32_t priority,
+ audio_usage_t usage,
+ audio_unique_id_t *id)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+
+ if (typeStr == NULL && uuidStr == NULL) return BAD_VALUE;
+
+ // Convert type & uuid from string to effect_uuid_t.
+ effect_uuid_t type;
+ if (typeStr != NULL) {
+ status_t res = stringToGuid(typeStr, &type);
+ if (res != OK) return res;
+ } else {
+ type = *EFFECT_UUID_NULL;
+ }
+
+ effect_uuid_t uuid;
+ if (uuidStr != NULL) {
+ status_t res = stringToGuid(uuidStr, &uuid);
+ if (res != OK) return res;
+ } else {
+ uuid = *EFFECT_UUID_NULL;
+ }
+
+ return aps->addStreamDefaultEffect(&type, opPackageName, &uuid, priority, usage, id);
+}
+
+status_t AudioEffect::removeStreamDefaultEffect(audio_unique_id_t id)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+
+ return aps->removeStreamDefaultEffect(id);
+}
+
// -------------------------------------------------------------------------
status_t AudioEffect::stringToGuid(const char *str, effect_uuid_t *guid)
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index c072901..e260fd8 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -878,31 +878,25 @@
flags, selectedDeviceId, portId);
}
-status_t AudioSystem::startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioSystem::startOutput(audio_port_handle_t portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->startOutput(output, stream, session);
+ return aps->startOutput(portId);
}
-status_t AudioSystem::stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioSystem::stopOutput(audio_port_handle_t portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->stopOutput(output, stream, session);
+ return aps->stopOutput(portId);
}
-void AudioSystem::releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioSystem::releaseOutput(audio_port_handle_t portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return;
- aps->releaseOutput(output, stream, session);
+ aps->releaseOutput(portId);
}
status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,
@@ -1244,18 +1238,18 @@
status_t AudioSystem::startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_patch_handle_t *handle)
+ audio_port_handle_t *portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->startAudioSource(source, attributes, handle);
+ return aps->startAudioSource(source, attributes, portId);
}
-status_t AudioSystem::stopAudioSource(audio_patch_handle_t handle)
+status_t AudioSystem::stopAudioSource(audio_port_handle_t portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->stopAudioSource(handle);
+ return aps->stopAudioSource(portId);
}
status_t AudioSystem::setMasterMono(bool mono)
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index dced3c4..a018b22 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -1231,6 +1231,21 @@
return 0;
}
+__attribute__((no_sanitize("integer")))
+size_t AudioRecordServerProxy::framesReadySafe() const
+{
+ if (mIsShutdown) {
+ return 0;
+ }
+ const int32_t front = android_atomic_acquire_load(&mCblk->u.mStreaming.mFront);
+ const int32_t rear = mCblk->u.mStreaming.mRear;
+ const ssize_t filled = rear - front;
+ if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+ return 0; // error condition, silently return 0.
+ }
+ return filled;
+}
+
// ---------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 00af7e8..00678c2 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -24,10 +24,8 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
-#include <cutils/multiuser.h>
#include <media/TimeCheck.h>
-#include <private/android_filesystem_config.h>
-
+#include <mediautils/ServiceUtilities.h>
#include "IAudioFlinger.h"
namespace android {
@@ -600,14 +598,18 @@
}
virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
- effect_descriptor_t *pDescriptor) const
+ const effect_uuid_t *pType,
+ uint32_t preferredTypeFlag,
+ effect_descriptor_t *pDescriptor) const
{
- if (pUuid == NULL || pDescriptor == NULL) {
+ if (pUuid == NULL || pType == NULL || pDescriptor == NULL) {
return BAD_VALUE;
}
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.write(pUuid, sizeof(effect_uuid_t));
+ data.write(pType, sizeof(effect_uuid_t));
+ data.writeUint32(preferredTypeFlag);
status_t status = remote()->transact(GET_EFFECT_DESCRIPTOR, data, &reply);
if (status != NO_ERROR) {
return status;
@@ -636,10 +638,10 @@
sp<IEffect> effect;
if (pDesc == NULL) {
- return effect;
if (status != NULL) {
*status = BAD_VALUE;
}
+ return effect;
}
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -912,7 +914,7 @@
case SET_MIC_MUTE:
case SET_LOW_RAM_DEVICE:
case SYSTEM_READY: {
- if (multiuser_get_app_id(IPCThreadState::self()->getCallingUid()) >= AID_APP_START) {
+ if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
@@ -951,7 +953,8 @@
break;
}
- TimeCheck check("IAudioFlinger");
+ std::string tag("IAudioFlinger command " + std::to_string(code));
+ TimeCheck check(tag.c_str());
switch (code) {
case CREATE_TRACK: {
@@ -1278,8 +1281,11 @@
CHECK_INTERFACE(IAudioFlinger, data, reply);
effect_uuid_t uuid;
data.read(&uuid, sizeof(effect_uuid_t));
+ effect_uuid_t type;
+ data.read(&type, sizeof(effect_uuid_t));
+ uint32_t preferredTypeFlag = data.readUint32();
effect_descriptor_t desc = {};
- status_t status = getEffectDescriptor(&uuid, &desc);
+ status_t status = getEffectDescriptor(&uuid, &type, preferredTypeFlag, &desc);
reply->writeInt32(status);
if (status == NO_ERROR) {
reply->write(&desc, sizeof(effect_descriptor_t));
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index a1236e7..abf74f8 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -24,11 +24,10 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
-#include <cutils/multiuser.h>
#include <media/AudioEffect.h>
#include <media/IAudioPolicyService.h>
#include <media/TimeCheck.h>
-#include <private/android_filesystem_config.h>
+#include <mediautils/ServiceUtilities.h>
#include <system/audio.h>
namespace android {
@@ -82,7 +81,9 @@
GET_MASTER_MONO,
GET_STREAM_VOLUME_DB,
GET_SURROUND_FORMATS,
- SET_SURROUND_FORMAT_ENABLED
+ SET_SURROUND_FORMAT_ENABLED,
+ ADD_STREAM_DEFAULT_EFFECT,
+ REMOVE_STREAM_DEFAULT_EFFECT
};
#define MAX_ITEMS_PER_LIST 1024
@@ -245,41 +246,29 @@
return status;
}
- virtual status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+ virtual status_t startOutput(audio_port_handle_t portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(output);
- data.writeInt32((int32_t) stream);
- data.writeInt32((int32_t) session);
+ data.writeInt32((int32_t)portId);
remote()->transact(START_OUTPUT, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
- virtual status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+ virtual status_t stopOutput(audio_port_handle_t portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(output);
- data.writeInt32((int32_t) stream);
- data.writeInt32((int32_t) session);
+ data.writeInt32((int32_t)portId);
remote()->transact(STOP_OUTPUT, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
- virtual void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+ virtual void releaseOutput(audio_port_handle_t portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(output);
- data.writeInt32((int32_t)stream);
- data.writeInt32((int32_t)session);
+ data.writeInt32((int32_t)portId);
remote()->transact(RELEASE_OUTPUT, data, &reply);
}
@@ -753,11 +742,11 @@
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_patch_handle_t *handle)
+ audio_port_handle_t *portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- if (source == NULL || attributes == NULL || handle == NULL) {
+ if (source == NULL || attributes == NULL || portId == NULL) {
return BAD_VALUE;
}
data.write(source, sizeof(struct audio_port_config));
@@ -770,15 +759,15 @@
if (status != NO_ERROR) {
return status;
}
- *handle = (audio_patch_handle_t)reply.readInt32();
+ *portId = (audio_port_handle_t)reply.readInt32();
return status;
}
- virtual status_t stopAudioSource(audio_patch_handle_t handle)
+ virtual status_t stopAudioSource(audio_port_handle_t portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(handle);
+ data.writeInt32(portId);
status_t status = remote()->transact(STOP_AUDIO_SOURCE, data, &reply);
if (status != NO_ERROR) {
return status;
@@ -879,6 +868,42 @@
}
return reply.readInt32();
}
+
+ virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+ const String16& opPackageName,
+ const effect_uuid_t *uuid,
+ int32_t priority,
+ audio_usage_t usage,
+ audio_unique_id_t* id)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(type, sizeof(effect_uuid_t));
+ data.writeString16(opPackageName);
+ data.write(uuid, sizeof(effect_uuid_t));
+ data.writeInt32(priority);
+ data.writeInt32((int32_t) usage);
+ status_t status = remote()->transact(ADD_STREAM_DEFAULT_EFFECT, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = static_cast <status_t> (reply.readInt32());
+ *id = reply.readInt32();
+ return status;
+ }
+
+ virtual status_t removeStreamDefaultEffect(audio_unique_id_t id)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(id);
+ status_t status = remote()->transact(REMOVE_STREAM_DEFAULT_EFFECT, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast <status_t> (reply.readInt32());
+ }
+
};
IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -936,7 +961,7 @@
case STOP_AUDIO_SOURCE:
case GET_SURROUND_FORMATS:
case SET_SURROUND_FORMAT_ENABLED: {
- if (multiuser_get_app_id(IPCThreadState::self()->getCallingUid()) >= AID_APP_START) {
+ if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
@@ -948,7 +973,8 @@
break;
}
- TimeCheck check("IAudioPolicyService");
+ std::string tag("IAudioPolicyService command " + std::to_string(code));
+ TimeCheck check(tag.c_str());
switch (code) {
case SET_DEVICE_CONNECTION_STATE: {
@@ -1075,34 +1101,22 @@
case START_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
- audio_stream_type_t stream =
- static_cast <audio_stream_type_t>(data.readInt32());
- audio_session_t session = (audio_session_t)data.readInt32();
- reply->writeInt32(static_cast <uint32_t>(startOutput(output,
- stream,
- session)));
+ const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+ reply->writeInt32(static_cast <uint32_t>(startOutput(portId)));
return NO_ERROR;
} break;
case STOP_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
- audio_stream_type_t stream =
- static_cast <audio_stream_type_t>(data.readInt32());
- audio_session_t session = (audio_session_t)data.readInt32();
- reply->writeInt32(static_cast <uint32_t>(stopOutput(output,
- stream,
- session)));
+ const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+ reply->writeInt32(static_cast <uint32_t>(stopOutput(portId)));
return NO_ERROR;
} break;
case RELEASE_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
- audio_stream_type_t stream = (audio_stream_type_t)data.readInt32();
- audio_session_t session = (audio_session_t)data.readInt32();
- releaseOutput(output, stream, session);
+ const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+ releaseOutput(portId);
return NO_ERROR;
} break;
@@ -1496,17 +1510,17 @@
audio_attributes_t attributes = {};
data.read(&attributes, sizeof(audio_attributes_t));
sanetizeAudioAttributes(&attributes);
- audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
- status_t status = startAudioSource(&source, &attributes, &handle);
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+ status_t status = startAudioSource(&source, &attributes, &portId);
reply->writeInt32(status);
- reply->writeInt32(handle);
+ reply->writeInt32(portId);
return NO_ERROR;
} break;
case STOP_AUDIO_SOURCE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_patch_handle_t handle = (audio_patch_handle_t) data.readInt32();
- status_t status = stopAudioSource(handle);
+ audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
+ status_t status = stopAudioSource(portId);
reply->writeInt32(status);
return NO_ERROR;
} break;
@@ -1585,6 +1599,43 @@
return NO_ERROR;
}
+ case ADD_STREAM_DEFAULT_EFFECT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ effect_uuid_t type;
+ status_t status = data.read(&type, sizeof(effect_uuid_t));
+ if (status != NO_ERROR) {
+ return status;
+ }
+ String16 opPackageName;
+ status = data.readString16(&opPackageName);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ effect_uuid_t uuid;
+ status = data.read(&uuid, sizeof(effect_uuid_t));
+ if (status != NO_ERROR) {
+ return status;
+ }
+ int32_t priority = data.readInt32();
+ audio_usage_t usage = (audio_usage_t) data.readInt32();
+ audio_unique_id_t id = 0;
+ reply->writeInt32(static_cast <int32_t>(addStreamDefaultEffect(&type,
+ opPackageName,
+ &uuid,
+ priority,
+ usage,
+ &id)));
+ reply->writeInt32(id);
+ return NO_ERROR;
+ }
+
+ case REMOVE_STREAM_DEFAULT_EFFECT: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_unique_id_t id = static_cast<audio_unique_id_t>(data.readInt32());
+ reply->writeInt32(static_cast <int32_t>(removeStreamDefaultEffect(id)));
+ return NO_ERROR;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index 324bcb9..58a9baa 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -90,27 +90,34 @@
*/
static status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor);
-
/*
- * Returns the descriptor for the specified effect uuid.
+ * Returns a descriptor for the specified effect uuid or type.
+ *
+ * Lookup an effect by uuid, or if that's unspecified (EFFECT_UUID_NULL),
+ * do so by type and preferred flags instead.
*
* Parameters:
* uuid: pointer to effect uuid.
+ * type: pointer to effect type uuid.
+ * preferredTypeFlags: if multiple effects of the given type exist,
+ * one with a matching type flag will be chosen over one without.
+ * Use EFFECT_FLAG_TYPE_MASK to indicate no preference.
* descriptor: address where the effect descriptor should be returned.
*
* Returned status (from utils/Errors.h) can be:
* NO_ERROR successful operation.
* PERMISSION_DENIED could not get AudioFlinger interface
* NO_INIT effect library failed to initialize
- * BAD_VALUE invalid uuid or descriptor pointers
+ * BAD_VALUE invalid type or descriptor pointers
* NAME_NOT_FOUND no effect with this uuid found
*
* Returned value
* *descriptor updated with effect descriptor
*/
static status_t getEffectDescriptor(const effect_uuid_t *uuid,
- effect_descriptor_t *descriptor) /*const*/;
-
+ const effect_uuid_t *type,
+ uint32_t preferredTypeFlag,
+ effect_descriptor_t *descriptor);
/*
* Returns a list of descriptors corresponding to the pre processings enabled by default
@@ -144,6 +151,79 @@
uint32_t *count);
/*
+ * Gets a new system-wide unique effect id.
+ *
+ * Parameters:
+ * id: The address to return the generated id.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * NO_ERROR successful operation.
+ * PERMISSION_DENIED could not get AudioFlinger interface
+ * or caller lacks required permissions.
+ * Returned value
+ * *id: The new unique system-wide effect id.
+ */
+ static status_t newEffectUniqueId(audio_unique_id_t* id);
+
+ /*
+ * Static methods for adding/removing system-wide effects.
+ */
+
+ /*
+ * Adds an effect to the list of default output effects for a given stream type.
+ *
+ * If the effect is no longer available when a stream of the given type
+ * is created, the system will continue without adding it.
+ *
+ * Parameters:
+ * typeStr: Type uuid of effect to be a default: can be null if uuidStr is specified.
+ * This may correspond to the OpenSL ES interface implemented by this effect,
+ * or could be some vendor-defined type.
+ * opPackageName: The package name used for app op checks.
+ * uuidStr: Uuid of effect to be a default: can be null if type is specified.
+ * This uuid corresponds to a particular implementation of an effect type.
+ * Note if both uuidStr and typeStr are specified, typeStr is ignored.
+ * priority: Requested priority for effect control: the priority level corresponds to the
+ * value of priority parameter: negative values indicate lower priorities, positive
+ * values higher priorities, 0 being the normal priority.
+ * usage: The usage this effect should be a default for. Unrecognized values will be
+ * treated as AUDIO_USAGE_UNKNOWN.
+ * id: Address where the system-wide unique id of the default effect should be returned.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * NO_ERROR successful operation.
+ * PERMISSION_DENIED could not get AudioFlinger interface
+ * or caller lacks required permissions.
+ * NO_INIT effect library failed to initialize.
+ * BAD_VALUE invalid type uuid or implementation uuid.
+ * NAME_NOT_FOUND no effect with this uuid or type found.
+ *
+ * Returned value
+ * *id: The system-wide unique id of the added default effect.
+ */
+ static status_t addStreamDefaultEffect(const char* typeStr,
+ const String16& opPackageName,
+ const char* uuidStr,
+ int32_t priority,
+ audio_usage_t usage,
+ audio_unique_id_t* id);
+
+ /*
+ * Removes an effect from the list of default output effects for a given stream type.
+ *
+ * Parameters:
+ * id: The system-wide unique id of the effect that should no longer be a default.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * NO_ERROR successful operation.
+ * PERMISSION_DENIED could not get AudioFlinger interface
+ * or caller lacks required permissions.
+ * NO_INIT effect library failed to initialize.
+ * BAD_VALUE invalid id.
+ */
+ static status_t removeStreamDefaultEffect(audio_unique_id_t id);
+
+ /*
* Events used by callback function (effect_callback_t).
*/
enum event_type {
diff --git a/media/libaudioclient/include/media/AudioParameter.h b/media/libaudioclient/include/media/AudioParameter.h
index 967d895..24837e3 100644
--- a/media/libaudioclient/include/media/AudioParameter.h
+++ b/media/libaudioclient/include/media/AudioParameter.h
@@ -64,6 +64,9 @@
static const char * const keyPresentationId;
static const char * const keyProgramId;
+ // keyAudioLanguagePreferred: Preferred audio language
+ static const char * const keyAudioLanguagePreferred;
+
// keyStreamConnect / Disconnect: value is an int in audio_devices_t
static const char * const keyStreamConnect;
static const char * const keyStreamDisconnect;
diff --git a/media/libaudioclient/include/media/AudioPolicyHelper.h b/media/libaudioclient/include/media/AudioPolicyHelper.h
index 73ee0a7..35d2e85 100644
--- a/media/libaudioclient/include/media/AudioPolicyHelper.h
+++ b/media/libaudioclient/include/media/AudioPolicyHelper.h
@@ -19,6 +19,43 @@
#include <system/audio.h>
static inline
+audio_stream_type_t audio_usage_to_stream_type(const audio_usage_t usage)
+{
+ switch(usage) {
+ case AUDIO_USAGE_MEDIA:
+ case AUDIO_USAGE_GAME:
+ case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+ case AUDIO_USAGE_ASSISTANT:
+ return AUDIO_STREAM_MUSIC;
+ case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+ return AUDIO_STREAM_ACCESSIBILITY;
+ case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+ return AUDIO_STREAM_SYSTEM;
+ case AUDIO_USAGE_VOICE_COMMUNICATION:
+ return AUDIO_STREAM_VOICE_CALL;
+
+ case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+ return AUDIO_STREAM_DTMF;
+
+ case AUDIO_USAGE_ALARM:
+ return AUDIO_STREAM_ALARM;
+ case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+ return AUDIO_STREAM_RING;
+
+ case AUDIO_USAGE_NOTIFICATION:
+ case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+ case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+ case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+ case AUDIO_USAGE_NOTIFICATION_EVENT:
+ return AUDIO_STREAM_NOTIFICATION;
+
+ case AUDIO_USAGE_UNKNOWN:
+ default:
+ return AUDIO_STREAM_MUSIC;
+ }
+}
+
+static inline
audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
{
// flags to stream type mapping
@@ -30,38 +67,7 @@
}
// usage to stream type mapping
- switch (attr->usage) {
- case AUDIO_USAGE_MEDIA:
- case AUDIO_USAGE_GAME:
- case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
- case AUDIO_USAGE_ASSISTANT:
- return AUDIO_STREAM_MUSIC;
- case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
- return AUDIO_STREAM_ACCESSIBILITY;
- case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
- return AUDIO_STREAM_SYSTEM;
- case AUDIO_USAGE_VOICE_COMMUNICATION:
- return AUDIO_STREAM_VOICE_CALL;
-
- case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
- return AUDIO_STREAM_DTMF;
-
- case AUDIO_USAGE_ALARM:
- return AUDIO_STREAM_ALARM;
- case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
- return AUDIO_STREAM_RING;
-
- case AUDIO_USAGE_NOTIFICATION:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
- case AUDIO_USAGE_NOTIFICATION_EVENT:
- return AUDIO_STREAM_NOTIFICATION;
-
- case AUDIO_USAGE_UNKNOWN:
- default:
- return AUDIO_STREAM_MUSIC;
- }
+ return audio_usage_to_stream_type(attr->usage);
}
static inline
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 4c0f796..adfee8b 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -224,15 +224,9 @@
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId);
- static status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- static status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- static void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
+ static status_t startOutput(audio_port_handle_t portId);
+ static status_t stopOutput(audio_port_handle_t portId);
+ static void releaseOutput(audio_port_handle_t portId);
// Client must successfully hand off the handle reference to AudioFlinger via createRecord(),
// or release it with releaseInput().
@@ -328,9 +322,9 @@
static status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
static status_t startAudioSource(const struct audio_port_config *source,
- const audio_attributes_t *attributes,
- audio_patch_handle_t *handle);
- static status_t stopAudioSource(audio_patch_handle_t handle);
+ const audio_attributes_t *attributes,
+ audio_port_handle_t *portId);
+ static status_t stopAudioSource(audio_port_handle_t portId);
static status_t setMasterMono(bool mono);
static status_t getMasterMono(bool *mono);
diff --git a/media/libaudioclient/include/media/AudioTimestamp.h b/media/libaudioclient/include/media/AudioTimestamp.h
index 498de8e..e5925dd 100644
--- a/media/libaudioclient/include/media/AudioTimestamp.h
+++ b/media/libaudioclient/include/media/AudioTimestamp.h
@@ -135,8 +135,23 @@
return INVALID_OPERATION;
}
+ double getOutputServerLatencyMs(uint32_t sampleRate) const {
+ return getLatencyMs(sampleRate, LOCATION_SERVER, LOCATION_KERNEL);
+ }
+
+ double getLatencyMs(uint32_t sampleRate, Location location1, Location location2) const {
+ if (sampleRate > 0 && mTimeNs[location1] > 0 && mTimeNs[location2] > 0) {
+ const int64_t frameDifference =
+ mPosition[location1] - mPosition[location2];
+ const int64_t timeDifferenceNs =
+ mTimeNs[location1] - mTimeNs[location2];
+ return ((double)frameDifference * 1e9 / sampleRate - timeDifferenceNs) * 1e-6;
+ }
+ return 0.;
+ }
+
// convert fields to a printable string
- std::string toString() {
+ std::string toString() const {
std::stringstream ss;
ss << "BOOTTIME offset " << mTimebaseOffset[TIMEBASE_BOOTTIME] << "\n";
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index e6bf72f..31326ab 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -428,7 +428,9 @@
virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const = 0;
virtual status_t getEffectDescriptor(const effect_uuid_t *pEffectUUID,
- effect_descriptor_t *pDescriptor) const = 0;
+ const effect_uuid_t *pTypeUUID,
+ uint32_t preferredTypeFlag,
+ effect_descriptor_t *pDescriptor) const = 0;
virtual sp<IEffect> createEffect(
effect_descriptor_t *pDesc,
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index c3876af..c2899f8 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -66,15 +66,9 @@
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId) = 0;
- virtual status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
- virtual status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
- virtual void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
+ virtual status_t startOutput(audio_port_handle_t portId) = 0;
+ virtual status_t stopOutput(audio_port_handle_t portId) = 0;
+ virtual void releaseOutput(audio_port_handle_t portId) = 0;
virtual status_t getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
@@ -115,6 +109,13 @@
virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count) = 0;
+ virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+ const String16& opPackageName,
+ const effect_uuid_t *uuid,
+ int32_t priority,
+ audio_usage_t usage,
+ audio_unique_id_t* id) = 0;
+ virtual status_t removeStreamDefaultEffect(audio_unique_id_t id) = 0;
// Check if offload is possible for given format, stream type, sample rate,
// bit rate, duration, video and streaming or offload property is enabled
virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
@@ -159,8 +160,8 @@
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_patch_handle_t *handle) = 0;
- virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
+ audio_port_handle_t *portId) = 0;
+ virtual status_t stopAudioSource(audio_port_handle_t portId) = 0;
virtual status_t setMasterMono(bool mono) = 0;
virtual status_t getMasterMono(bool *mono) = 0;
diff --git a/media/libaudiohal/2.0/Android.bp b/media/libaudiohal/2.0/Android.bp
deleted file mode 100644
index 574b435..0000000
--- a/media/libaudiohal/2.0/Android.bp
+++ /dev/null
@@ -1,54 +0,0 @@
-cc_library_shared {
- name: "libaudiohal@2.0",
-
- srcs: [
- "DeviceHalLocal.cpp",
- "DevicesFactoryHalHybrid.cpp",
- "DevicesFactoryHalLocal.cpp",
- "StreamHalLocal.cpp",
-
- "ConversionHelperHidl.cpp",
- "DeviceHalHidl.cpp",
- "DevicesFactoryHalHidl.cpp",
- "EffectBufferHalHidl.cpp",
- "EffectHalHidl.cpp",
- "EffectsFactoryHalHidl.cpp",
- "StreamHalHidl.cpp",
- ],
-
- export_include_dirs: ["."],
-
- cflags: [
- "-Wall",
- "-Werror",
- ],
- shared_libs: [
- "libaudiohal_deathhandler",
- "libaudioutils",
- "libcutils",
- "liblog",
- "libutils",
- "libhardware",
- "libbase",
- "libfmq",
- "libhwbinder",
- "libhidlbase",
- "libhidlmemory",
- "libhidltransport",
- "android.hardware.audio@2.0",
- "android.hardware.audio.common@2.0",
- "android.hardware.audio.common@2.0-util",
- "android.hardware.audio.effect@2.0",
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
- "libmedia_helper",
- "libmediautils",
- ],
- header_libs: [
- "libaudiohal_headers"
- ],
-
- export_shared_lib_headers: [
- "libfmq",
- ],
-}
diff --git a/media/libaudiohal/2.0/ConversionHelperHidl.cpp b/media/libaudiohal/2.0/ConversionHelperHidl.cpp
deleted file mode 100644
index f60bf8b..0000000
--- a/media/libaudiohal/2.0/ConversionHelperHidl.cpp
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "HalHidl"
-#include <media/AudioParameter.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-
-using ::android::hardware::audio::V2_0::Result;
-
-namespace android {
-
-// static
-status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
- AudioParameter halKeys(keys);
- if (halKeys.size() == 0) return BAD_VALUE;
- hidlKeys->resize(halKeys.size());
- //FIXME: keyStreamSupportedChannels and keyStreamSupportedSamplingRates come with a
- // "keyFormat=<value>" pair. We need to transform it into a single key string so that it is
- // carried over to the legacy HAL via HIDL.
- String8 value;
- bool keepFormatValue = halKeys.size() == 2 &&
- (halKeys.get(String8(AudioParameter::keyStreamSupportedChannels), value) == NO_ERROR ||
- halKeys.get(String8(AudioParameter::keyStreamSupportedSamplingRates), value) == NO_ERROR);
-
- for (size_t i = 0; i < halKeys.size(); ++i) {
- String8 key;
- status_t status = halKeys.getAt(i, key);
- if (status != OK) return status;
- if (keepFormatValue && key == AudioParameter::keyFormat) {
- AudioParameter formatParam;
- halKeys.getAt(i, key, value);
- formatParam.add(key, value);
- key = formatParam.toString();
- }
- (*hidlKeys)[i] = key.string();
- }
- return OK;
-}
-
-// static
-status_t ConversionHelperHidl::parametersFromHal(
- const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams) {
- AudioParameter params(kvPairs);
- if (params.size() == 0) return BAD_VALUE;
- hidlParams->resize(params.size());
- for (size_t i = 0; i < params.size(); ++i) {
- String8 key, value;
- status_t status = params.getAt(i, key, value);
- if (status != OK) return status;
- (*hidlParams)[i].key = key.string();
- (*hidlParams)[i].value = value.string();
- }
- return OK;
-}
-
-// static
-void ConversionHelperHidl::parametersToHal(
- const hidl_vec<ParameterValue>& parameters, String8 *values) {
- AudioParameter params;
- for (size_t i = 0; i < parameters.size(); ++i) {
- params.add(String8(parameters[i].key.c_str()), String8(parameters[i].value.c_str()));
- }
- values->setTo(params.toString());
-}
-
-ConversionHelperHidl::ConversionHelperHidl(const char* className)
- : mClassName(className) {
-}
-
-// static
-status_t ConversionHelperHidl::analyzeResult(const Result& result) {
- switch (result) {
- case Result::OK: return OK;
- case Result::INVALID_ARGUMENTS: return BAD_VALUE;
- case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
- case Result::NOT_INITIALIZED: return NO_INIT;
- case Result::NOT_SUPPORTED: return INVALID_OPERATION;
- default: return NO_INIT;
- }
-}
-
-void ConversionHelperHidl::emitError(const char* funcName, const char* description) {
- ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.cpp b/media/libaudiohal/2.0/DeviceHalHidl.cpp
deleted file mode 100644
index 5b99d70..0000000
--- a/media/libaudiohal/2.0/DeviceHalHidl.cpp
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdio.h>
-
-#define LOG_TAG "DeviceHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IPrimaryDevice.h>
-#include <cutils/native_handle.h>
-#include <hwbinder/IPCThreadState.h>
-#include <utils/Log.h>
-
-#include "DeviceHalHidl.h"
-#include "HidlUtils.h"
-#include "StreamHalHidl.h"
-
-using ::android::hardware::audio::common::V2_0::AudioConfig;
-using ::android::hardware::audio::common::V2_0::AudioDevice;
-using ::android::hardware::audio::common::V2_0::AudioInputFlag;
-using ::android::hardware::audio::common::V2_0::AudioOutputFlag;
-using ::android::hardware::audio::common::V2_0::AudioPatchHandle;
-using ::android::hardware::audio::common::V2_0::AudioPort;
-using ::android::hardware::audio::common::V2_0::AudioPortConfig;
-using ::android::hardware::audio::common::V2_0::AudioMode;
-using ::android::hardware::audio::common::V2_0::AudioSource;
-using ::android::hardware::audio::common::V2_0::HidlUtils;
-using ::android::hardware::audio::V2_0::DeviceAddress;
-using ::android::hardware::audio::V2_0::IPrimaryDevice;
-using ::android::hardware::audio::V2_0::ParameterValue;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-
-namespace android {
-
-namespace {
-
-status_t deviceAddressFromHal(
- audio_devices_t device, const char* halAddress, DeviceAddress* address) {
- address->device = AudioDevice(device);
-
- if (halAddress == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
- return OK;
- }
- const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
- if (isInput) device &= ~AUDIO_DEVICE_BIT_IN;
- if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_A2DP) != 0)
- || (isInput && (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
- int status = sscanf(halAddress,
- "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX",
- &address->address.mac[0], &address->address.mac[1], &address->address.mac[2],
- &address->address.mac[3], &address->address.mac[4], &address->address.mac[5]);
- return status == 6 ? OK : BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_IP) != 0)
- || (isInput && (device & AUDIO_DEVICE_IN_IP) != 0)) {
- int status = sscanf(halAddress,
- "%hhu.%hhu.%hhu.%hhu",
- &address->address.ipv4[0], &address->address.ipv4[1],
- &address->address.ipv4[2], &address->address.ipv4[3]);
- return status == 4 ? OK : BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_USB)) != 0
- || (isInput && (device & AUDIO_DEVICE_IN_ALL_USB)) != 0) {
- int status = sscanf(halAddress,
- "card=%d;device=%d",
- &address->address.alsa.card, &address->address.alsa.device);
- return status == 2 ? OK : BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_BUS) != 0)
- || (isInput && (device & AUDIO_DEVICE_IN_BUS) != 0)) {
- if (halAddress != NULL) {
- address->busAddress = halAddress;
- return OK;
- }
- return BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0
- || (isInput && (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
- if (halAddress != NULL) {
- address->rSubmixAddress = halAddress;
- return OK;
- }
- return BAD_VALUE;
- }
- return OK;
-}
-
-} // namespace
-
-DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
- : ConversionHelperHidl("Device"), mDevice(device),
- mPrimaryDevice(IPrimaryDevice::castFrom(device)) {
-}
-
-DeviceHalHidl::~DeviceHalHidl() {
- if (mDevice != 0) {
- mDevice.clear();
- hardware::IPCThreadState::self()->flushCommands();
- }
-}
-
-status_t DeviceHalHidl::getSupportedDevices(uint32_t*) {
- // Obsolete.
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalHidl::initCheck() {
- if (mDevice == 0) return NO_INIT;
- return processReturn("initCheck", mDevice->initCheck());
-}
-
-status_t DeviceHalHidl::setVoiceVolume(float volume) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setVoiceVolume", mPrimaryDevice->setVoiceVolume(volume));
-}
-
-status_t DeviceHalHidl::setMasterVolume(float volume) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
-}
-
-status_t DeviceHalHidl::getMasterVolume(float *volume) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- Result retval;
- Return<void> ret = mPrimaryDevice->getMasterVolume(
- [&](Result r, float v) {
- retval = r;
- if (retval == Result::OK) {
- *volume = v;
- }
- });
- return processReturn("getMasterVolume", ret, retval);
-}
-
-status_t DeviceHalHidl::setMode(audio_mode_t mode) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setMode", mPrimaryDevice->setMode(AudioMode(mode)));
-}
-
-status_t DeviceHalHidl::setMicMute(bool state) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("setMicMute", mDevice->setMicMute(state));
-}
-
-status_t DeviceHalHidl::getMicMute(bool *state) {
- if (mDevice == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mDevice->getMicMute(
- [&](Result r, bool mute) {
- retval = r;
- if (retval == Result::OK) {
- *state = mute;
- }
- });
- return processReturn("getMicMute", ret, retval);
-}
-
-status_t DeviceHalHidl::setMasterMute(bool state) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("setMasterMute", mDevice->setMasterMute(state));
-}
-
-status_t DeviceHalHidl::getMasterMute(bool *state) {
- if (mDevice == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mDevice->getMasterMute(
- [&](Result r, bool mute) {
- retval = r;
- if (retval == Result::OK) {
- *state = mute;
- }
- });
- return processReturn("getMasterMute", ret, retval);
-}
-
-status_t DeviceHalHidl::setParameters(const String8& kvPairs) {
- if (mDevice == 0) return NO_INIT;
- hidl_vec<ParameterValue> hidlParams;
- status_t status = parametersFromHal(kvPairs, &hidlParams);
- if (status != OK) return status;
- return processReturn("setParameters", mDevice->setParameters(hidlParams));
-}
-
-status_t DeviceHalHidl::getParameters(const String8& keys, String8 *values) {
- values->clear();
- if (mDevice == 0) return NO_INIT;
- hidl_vec<hidl_string> hidlKeys;
- status_t status = keysFromHal(keys, &hidlKeys);
- if (status != OK) return status;
- Result retval;
- Return<void> ret = mDevice->getParameters(
- hidlKeys,
- [&](Result r, const hidl_vec<ParameterValue>& parameters) {
- retval = r;
- if (retval == Result::OK) {
- parametersToHal(parameters, values);
- }
- });
- return processReturn("getParameters", ret, retval);
-}
-
-status_t DeviceHalHidl::getInputBufferSize(
- const struct audio_config *config, size_t *size) {
- if (mDevice == 0) return NO_INIT;
- AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, &hidlConfig);
- Result retval;
- Return<void> ret = mDevice->getInputBufferSize(
- hidlConfig,
- [&](Result r, uint64_t bufferSize) {
- retval = r;
- if (retval == Result::OK) {
- *size = static_cast<size_t>(bufferSize);
- }
- });
- return processReturn("getInputBufferSize", ret, retval);
-}
-
-status_t DeviceHalHidl::openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream) {
- if (mDevice == 0) return NO_INIT;
- DeviceAddress hidlDevice;
- status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
- if (status != OK) return status;
- AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, &hidlConfig);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevice->openOutputStream(
- handle,
- hidlDevice,
- hidlConfig,
- AudioOutputFlag(flags),
- [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
- retval = r;
- if (retval == Result::OK) {
- *outStream = new StreamOutHalHidl(result);
- }
- HidlUtils::audioConfigToHal(suggestedConfig, config);
- });
- return processReturn("openOutputStream", ret, retval);
-}
-
-status_t DeviceHalHidl::openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream) {
- if (mDevice == 0) return NO_INIT;
- DeviceAddress hidlDevice;
- status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
- if (status != OK) return status;
- AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, &hidlConfig);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevice->openInputStream(
- handle,
- hidlDevice,
- hidlConfig,
- AudioInputFlag(flags),
- AudioSource(source),
- [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
- retval = r;
- if (retval == Result::OK) {
- *inStream = new StreamInHalHidl(result);
- }
- HidlUtils::audioConfigToHal(suggestedConfig, config);
- });
- return processReturn("openInputStream", ret, retval);
-}
-
-status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
-}
-
-status_t DeviceHalHidl::createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch) {
- if (mDevice == 0) return NO_INIT;
- hidl_vec<AudioPortConfig> hidlSources, hidlSinks;
- HidlUtils::audioPortConfigsFromHal(num_sources, sources, &hidlSources);
- HidlUtils::audioPortConfigsFromHal(num_sinks, sinks, &hidlSinks);
- Result retval;
- Return<void> ret = mDevice->createAudioPatch(
- hidlSources, hidlSinks,
- [&](Result r, AudioPatchHandle hidlPatch) {
- retval = r;
- if (retval == Result::OK) {
- *patch = static_cast<audio_patch_handle_t>(hidlPatch);
- }
- });
- return processReturn("createAudioPatch", ret, retval);
-}
-
-status_t DeviceHalHidl::releaseAudioPatch(audio_patch_handle_t patch) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
-}
-
-status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
- if (mDevice == 0) return NO_INIT;
- AudioPort hidlPort;
- HidlUtils::audioPortFromHal(*port, &hidlPort);
- Result retval;
- Return<void> ret = mDevice->getAudioPort(
- hidlPort,
- [&](Result r, const AudioPort& p) {
- retval = r;
- if (retval == Result::OK) {
- HidlUtils::audioPortToHal(p, port);
- }
- });
- return processReturn("getAudioPort", ret, retval);
-}
-
-status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
- if (mDevice == 0) return NO_INIT;
- AudioPortConfig hidlConfig;
- HidlUtils::audioPortConfigFromHal(*config, &hidlConfig);
- return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
-}
-
-status_t DeviceHalHidl::getMicrophones(
- std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
- if (mDevice == 0) return NO_INIT;
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalHidl::dump(int fd) {
- if (mDevice == 0) return NO_INIT;
- native_handle_t* hidlHandle = native_handle_create(1, 0);
- hidlHandle->data[0] = fd;
- Return<void> ret = mDevice->debugDump(hidlHandle);
- native_handle_delete(hidlHandle);
- return processReturn("dump", ret);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.cpp b/media/libaudiohal/2.0/DeviceHalLocal.cpp
deleted file mode 100644
index ec3bf78..0000000
--- a/media/libaudiohal/2.0/DeviceHalLocal.cpp
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DeviceHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "StreamHalLocal.h"
-
-namespace android {
-
-DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
- : mDev(dev) {
-}
-
-DeviceHalLocal::~DeviceHalLocal() {
- int status = audio_hw_device_close(mDev);
- ALOGW_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
- mDev = 0;
-}
-
-status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
- if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
- *devices = mDev->get_supported_devices(mDev);
- return OK;
-}
-
-status_t DeviceHalLocal::initCheck() {
- return mDev->init_check(mDev);
-}
-
-status_t DeviceHalLocal::setVoiceVolume(float volume) {
- return mDev->set_voice_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::setMasterVolume(float volume) {
- if (mDev->set_master_volume == NULL) return INVALID_OPERATION;
- return mDev->set_master_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::getMasterVolume(float *volume) {
- if (mDev->get_master_volume == NULL) return INVALID_OPERATION;
- return mDev->get_master_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::setMode(audio_mode_t mode) {
- return mDev->set_mode(mDev, mode);
-}
-
-status_t DeviceHalLocal::setMicMute(bool state) {
- return mDev->set_mic_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::getMicMute(bool *state) {
- return mDev->get_mic_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::setMasterMute(bool state) {
- if (mDev->set_master_mute == NULL) return INVALID_OPERATION;
- return mDev->set_master_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::getMasterMute(bool *state) {
- if (mDev->get_master_mute == NULL) return INVALID_OPERATION;
- return mDev->get_master_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::setParameters(const String8& kvPairs) {
- return mDev->set_parameters(mDev, kvPairs.string());
-}
-
-status_t DeviceHalLocal::getParameters(const String8& keys, String8 *values) {
- char *halValues = mDev->get_parameters(mDev, keys.string());
- if (halValues != NULL) {
- values->setTo(halValues);
- free(halValues);
- } else {
- values->clear();
- }
- return OK;
-}
-
-status_t DeviceHalLocal::getInputBufferSize(
- const struct audio_config *config, size_t *size) {
- *size = mDev->get_input_buffer_size(mDev, config);
- return OK;
-}
-
-status_t DeviceHalLocal::openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream) {
- audio_stream_out_t *halStream;
- ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
- "srate: %d format %#x channels %x address %s",
- handle, devices, flags,
- config->sample_rate, config->format, config->channel_mask,
- address);
- int openResut = mDev->open_output_stream(
- mDev, handle, devices, flags, config, &halStream, address);
- if (openResut == OK) {
- *outStream = new StreamOutHalLocal(halStream, this);
- }
- ALOGV("open_output_stream status %d stream %p", openResut, halStream);
- return openResut;
-}
-
-status_t DeviceHalLocal::openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream) {
- audio_stream_in_t *halStream;
- ALOGV("open_input_stream handle: %d devices: %x flags: %#x "
- "srate: %d format %#x channels %x address %s source %d",
- handle, devices, flags,
- config->sample_rate, config->format, config->channel_mask,
- address, source);
- int openResult = mDev->open_input_stream(
- mDev, handle, devices, config, &halStream, flags, address, source);
- if (openResult == OK) {
- *inStream = new StreamInHalLocal(halStream, this);
- }
- ALOGV("open_input_stream status %d stream %p", openResult, inStream);
- return openResult;
-}
-
-status_t DeviceHalLocal::supportsAudioPatches(bool *supportsPatches) {
- *supportsPatches = version() >= AUDIO_DEVICE_API_VERSION_3_0;
- return OK;
-}
-
-status_t DeviceHalLocal::createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch) {
- if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- return mDev->create_audio_patch(
- mDev, num_sources, sources, num_sinks, sinks, patch);
- } else {
- return INVALID_OPERATION;
- }
-}
-
-status_t DeviceHalLocal::releaseAudioPatch(audio_patch_handle_t patch) {
- if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- return mDev->release_audio_patch(mDev, patch);
- } else {
- return INVALID_OPERATION;
- }
-}
-
-status_t DeviceHalLocal::getAudioPort(struct audio_port *port) {
- return mDev->get_audio_port(mDev, port);
-}
-
-status_t DeviceHalLocal::setAudioPortConfig(const struct audio_port_config *config) {
- if (version() >= AUDIO_DEVICE_API_VERSION_3_0)
- return mDev->set_audio_port_config(mDev, config);
- else
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalLocal::getMicrophones(
- std::vector<media::MicrophoneInfo> *microphones __unused) {
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalLocal::dump(int fd) {
- return mDev->dump(mDev, fd);
-}
-
-void DeviceHalLocal::closeOutputStream(struct audio_stream_out *stream_out) {
- mDev->close_output_stream(mDev, stream_out);
-}
-
-void DeviceHalLocal::closeInputStream(struct audio_stream_in *stream_in) {
- mDev->close_input_stream(mDev, stream_in);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp b/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp
deleted file mode 100644
index 5b33592..0000000
--- a/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "DevicesFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IDevice.h>
-#include <media/audiohal/hidl/HalDeathHandler.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "DeviceHalHidl.h"
-#include "DevicesFactoryHalHidl.h"
-
-using ::android::hardware::audio::V2_0::IDevice;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
- mDevicesFactory = IDevicesFactory::getService();
- if (mDevicesFactory != 0) {
- // It is assumed that DevicesFactory is owned by AudioFlinger
- // and thus have the same lifespan.
- mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
- } else {
- ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
- exit(1);
- }
- // The MSD factory is optional
- mDevicesFactoryMsd = IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD);
- // TODO: Register death handler, and add 'restart' directive to audioserver.rc
-}
-
-DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
-}
-
-// static
-status_t DevicesFactoryHalHidl::nameFromHal(const char *name, IDevicesFactory::Device *device) {
- if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
- *device = IDevicesFactory::Device::PRIMARY;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
- *device = IDevicesFactory::Device::A2DP;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
- *device = IDevicesFactory::Device::USB;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
- *device = IDevicesFactory::Device::R_SUBMIX;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
- *device = IDevicesFactory::Device::STUB;
- return OK;
- }
- ALOGE("Invalid device name %s", name);
- return BAD_VALUE;
-}
-
-status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- if (mDevicesFactory == 0) return NO_INIT;
- IDevicesFactory::Device hidlDevice;
- status_t status = nameFromHal(name, &hidlDevice);
- if (status != OK) return status;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevicesFactory->openDevice(
- hidlDevice,
- [&](Result r, const sp<IDevice>& result) {
- retval = r;
- if (retval == Result::OK) {
- *device = new DeviceHalHidl(result);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
- else return NO_INIT;
- }
- return FAILED_TRANSACTION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp b/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp
deleted file mode 100644
index 13a9acd..0000000
--- a/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DevicesFactoryHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <string.h>
-
-#include <hardware/audio.h>
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "DevicesFactoryHalLocal.h"
-
-namespace android {
-
-static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
-{
- const hw_module_t *mod;
- int rc;
-
- rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
- if (rc) {
- ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
- AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
- goto out;
- }
- rc = audio_hw_device_open(mod, dev);
- if (rc) {
- ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
- AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
- goto out;
- }
- if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
- ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
- rc = BAD_VALUE;
- audio_hw_device_close(*dev);
- goto out;
- }
- return OK;
-
-out:
- *dev = NULL;
- return rc;
-}
-
-status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- audio_hw_device_t *dev;
- status_t rc = load_audio_interface(name, &dev);
- if (rc == OK) {
- *device = new DeviceHalLocal(dev);
- }
- return rc;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/EffectBufferHalHidl.cpp b/media/libaudiohal/2.0/EffectBufferHalHidl.cpp
deleted file mode 100644
index 226a500..0000000
--- a/media/libaudiohal/2.0/EffectBufferHalHidl.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <atomic>
-
-#define LOG_TAG "EffectBufferHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hidl/allocator/1.0/IAllocator.h>
-#include <hidlmemory/mapping.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-
-using ::android::hardware::Return;
-using ::android::hidl::allocator::V1_0::IAllocator;
-
-namespace android {
-
-// static
-uint64_t EffectBufferHalHidl::makeUniqueId() {
- static std::atomic<uint64_t> counter{1};
- return counter++;
-}
-
-status_t EffectBufferHalHidl::allocate(
- size_t size, sp<EffectBufferHalInterface>* buffer) {
- return mirror(nullptr, size, buffer);
-}
-
-status_t EffectBufferHalHidl::mirror(
- void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
- sp<EffectBufferHalInterface> tempBuffer = new EffectBufferHalHidl(size);
- status_t result = static_cast<EffectBufferHalHidl*>(tempBuffer.get())->init();
- if (result == OK) {
- tempBuffer->setExternalData(external);
- *buffer = tempBuffer;
- }
- return result;
-}
-
-EffectBufferHalHidl::EffectBufferHalHidl(size_t size)
- : mBufferSize(size), mFrameCountChanged(false),
- mExternalData(nullptr), mAudioBuffer{0, {nullptr}} {
- mHidlBuffer.id = makeUniqueId();
- mHidlBuffer.frameCount = 0;
-}
-
-EffectBufferHalHidl::~EffectBufferHalHidl() {
-}
-
-status_t EffectBufferHalHidl::init() {
- sp<IAllocator> ashmem = IAllocator::getService("ashmem");
- if (ashmem == 0) {
- ALOGE("Failed to retrieve ashmem allocator service");
- return NO_INIT;
- }
- status_t retval = NO_MEMORY;
- Return<void> result = ashmem->allocate(
- mBufferSize,
- [&](bool success, const hidl_memory& memory) {
- if (success) {
- mHidlBuffer.data = memory;
- retval = OK;
- }
- });
- if (result.isOk() && retval == OK) {
- mMemory = hardware::mapMemory(mHidlBuffer.data);
- if (mMemory != 0) {
- mMemory->update();
- mAudioBuffer.raw = static_cast<void*>(mMemory->getPointer());
- memset(mAudioBuffer.raw, 0, mMemory->getSize());
- mMemory->commit();
- } else {
- ALOGE("Failed to map allocated ashmem");
- retval = NO_MEMORY;
- }
- } else {
- ALOGE("Failed to allocate %d bytes from ashmem", (int)mBufferSize);
- }
- return result.isOk() ? retval : FAILED_TRANSACTION;
-}
-
-audio_buffer_t* EffectBufferHalHidl::audioBuffer() {
- return &mAudioBuffer;
-}
-
-void* EffectBufferHalHidl::externalData() const {
- return mExternalData;
-}
-
-void EffectBufferHalHidl::setFrameCount(size_t frameCount) {
- mHidlBuffer.frameCount = frameCount;
- mAudioBuffer.frameCount = frameCount;
- mFrameCountChanged = true;
-}
-
-bool EffectBufferHalHidl::checkFrameCountChange() {
- bool result = mFrameCountChanged;
- mFrameCountChanged = false;
- return result;
-}
-
-void EffectBufferHalHidl::setExternalData(void* external) {
- mExternalData = external;
-}
-
-void EffectBufferHalHidl::update() {
- update(mBufferSize);
-}
-
-void EffectBufferHalHidl::commit() {
- commit(mBufferSize);
-}
-
-void EffectBufferHalHidl::update(size_t size) {
- if (mExternalData == nullptr) return;
- mMemory->update();
- if (size > mBufferSize) size = mBufferSize;
- memcpy(mAudioBuffer.raw, mExternalData, size);
- mMemory->commit();
-}
-
-void EffectBufferHalHidl::commit(size_t size) {
- if (mExternalData == nullptr) return;
- if (size > mBufferSize) size = mBufferSize;
- memcpy(mExternalData, mAudioBuffer.raw, size);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/EffectHalHidl.cpp b/media/libaudiohal/2.0/EffectHalHidl.cpp
deleted file mode 100644
index 4fb032c..0000000
--- a/media/libaudiohal/2.0/EffectHalHidl.cpp
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <hwbinder/IPCThreadState.h>
-#include <media/EffectsFactoryApi.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-#include "EffectHalHidl.h"
-#include "HidlUtils.h"
-
-using ::android::hardware::audio::effect::V2_0::AudioBuffer;
-using ::android::hardware::audio::effect::V2_0::EffectBufferAccess;
-using ::android::hardware::audio::effect::V2_0::EffectConfigParameters;
-using ::android::hardware::audio::effect::V2_0::MessageQueueFlagBits;
-using ::android::hardware::audio::effect::V2_0::Result;
-using ::android::hardware::audio::common::V2_0::HidlUtils;
-using ::android::hardware::audio::common::V2_0::AudioChannelMask;
-using ::android::hardware::audio::common::V2_0::AudioFormat;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-
-namespace android {
-
-EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
- : mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
-}
-
-EffectHalHidl::~EffectHalHidl() {
- if (mEffect != 0) {
- close();
- mEffect.clear();
- hardware::IPCThreadState::self()->flushCommands();
- }
- if (mEfGroup) {
- EventFlag::deleteEventFlag(&mEfGroup);
- }
-}
-
-// static
-void EffectHalHidl::effectDescriptorToHal(
- const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
- HidlUtils::uuidToHal(descriptor.type, &halDescriptor->type);
- HidlUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
- halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
- halDescriptor->cpuLoad = descriptor.cpuLoad;
- halDescriptor->memoryUsage = descriptor.memoryUsage;
- memcpy(halDescriptor->name, descriptor.name.data(), descriptor.name.size());
- memcpy(halDescriptor->implementor,
- descriptor.implementor.data(), descriptor.implementor.size());
-}
-
-// TODO(mnaganov): These buffer conversion functions should be shared with Effect wrapper
-// via HidlUtils. Move them there when hardware/interfaces will get un-frozen again.
-
-// static
-void EffectHalHidl::effectBufferConfigFromHal(
- const buffer_config_t& halConfig, EffectBufferConfig* config) {
- config->samplingRateHz = halConfig.samplingRate;
- config->channels = AudioChannelMask(halConfig.channels);
- config->format = AudioFormat(halConfig.format);
- config->accessMode = EffectBufferAccess(halConfig.accessMode);
- config->mask = EffectConfigParameters(halConfig.mask);
-}
-
-// static
-void EffectHalHidl::effectBufferConfigToHal(
- const EffectBufferConfig& config, buffer_config_t* halConfig) {
- halConfig->buffer.frameCount = 0;
- halConfig->buffer.raw = NULL;
- halConfig->samplingRate = config.samplingRateHz;
- halConfig->channels = static_cast<uint32_t>(config.channels);
- halConfig->bufferProvider.cookie = NULL;
- halConfig->bufferProvider.getBuffer = NULL;
- halConfig->bufferProvider.releaseBuffer = NULL;
- halConfig->format = static_cast<uint8_t>(config.format);
- halConfig->accessMode = static_cast<uint8_t>(config.accessMode);
- halConfig->mask = static_cast<uint8_t>(config.mask);
-}
-
-// static
-void EffectHalHidl::effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config) {
- effectBufferConfigFromHal(halConfig.inputCfg, &config->inputCfg);
- effectBufferConfigFromHal(halConfig.outputCfg, &config->outputCfg);
-}
-
-// static
-void EffectHalHidl::effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig) {
- effectBufferConfigToHal(config.inputCfg, &halConfig->inputCfg);
- effectBufferConfigToHal(config.outputCfg, &halConfig->outputCfg);
-}
-
-// static
-status_t EffectHalHidl::analyzeResult(const Result& result) {
- switch (result) {
- case Result::OK: return OK;
- case Result::INVALID_ARGUMENTS: return BAD_VALUE;
- case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
- case Result::NOT_INITIALIZED: return NO_INIT;
- case Result::NOT_SUPPORTED: return INVALID_OPERATION;
- case Result::RESULT_TOO_BIG: return NO_MEMORY;
- default: return NO_INIT;
- }
-}
-
-status_t EffectHalHidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (!mBuffersChanged) {
- if (buffer.get() == nullptr || mInBuffer.get() == nullptr) {
- mBuffersChanged = buffer.get() != mInBuffer.get();
- } else {
- mBuffersChanged = buffer->audioBuffer() != mInBuffer->audioBuffer();
- }
- }
- mInBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalHidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (!mBuffersChanged) {
- if (buffer.get() == nullptr || mOutBuffer.get() == nullptr) {
- mBuffersChanged = buffer.get() != mOutBuffer.get();
- } else {
- mBuffersChanged = buffer->audioBuffer() != mOutBuffer->audioBuffer();
- }
- }
- mOutBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalHidl::process() {
- return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS));
-}
-
-status_t EffectHalHidl::processReverse() {
- return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS_REVERSE));
-}
-
-status_t EffectHalHidl::prepareForProcessing() {
- std::unique_ptr<StatusMQ> tempStatusMQ;
- Result retval;
- Return<void> ret = mEffect->prepareForProcessing(
- [&](Result r, const MQDescriptorSync<Result>& statusMQ) {
- retval = r;
- if (retval == Result::OK) {
- tempStatusMQ.reset(new StatusMQ(statusMQ));
- if (tempStatusMQ->isValid() && tempStatusMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(tempStatusMQ->getEventFlagWord(), &mEfGroup);
- }
- }
- });
- if (!ret.isOk() || retval != Result::OK) {
- return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
- }
- if (!tempStatusMQ || !tempStatusMQ->isValid() || !mEfGroup) {
- ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for effects");
- ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
- "Status message queue for effects is invalid");
- ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
- return NO_INIT;
- }
- mStatusMQ = std::move(tempStatusMQ);
- return OK;
-}
-
-bool EffectHalHidl::needToResetBuffers() {
- if (mBuffersChanged) return true;
- bool inBufferFrameCountUpdated = mInBuffer->checkFrameCountChange();
- bool outBufferFrameCountUpdated = mOutBuffer->checkFrameCountChange();
- return inBufferFrameCountUpdated || outBufferFrameCountUpdated;
-}
-
-status_t EffectHalHidl::processImpl(uint32_t mqFlag) {
- if (mEffect == 0 || mInBuffer == 0 || mOutBuffer == 0) return NO_INIT;
- status_t status;
- if (!mStatusMQ && (status = prepareForProcessing()) != OK) {
- return status;
- }
- if (needToResetBuffers() && (status = setProcessBuffers()) != OK) {
- return status;
- }
- // The data is already in the buffers, just need to flush it and wake up the server side.
- std::atomic_thread_fence(std::memory_order_release);
- mEfGroup->wake(mqFlag);
- uint32_t efState = 0;
-retry:
- status_t ret = mEfGroup->wait(
- static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING), &efState);
- if (efState & static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING)) {
- Result retval = Result::NOT_INITIALIZED;
- mStatusMQ->read(&retval);
- if (retval == Result::OK || retval == Result::INVALID_STATE) {
- // Sync back the changed contents of the buffer.
- std::atomic_thread_fence(std::memory_order_acquire);
- }
- return analyzeResult(retval);
- }
- if (ret == -EAGAIN || ret == -EINTR) {
- // Spurious wakeup. This normally retries no more than once.
- goto retry;
- }
- return ret;
-}
-
-status_t EffectHalHidl::setProcessBuffers() {
- Return<Result> ret = mEffect->setProcessBuffers(
- static_cast<EffectBufferHalHidl*>(mInBuffer.get())->hidlBuffer(),
- static_cast<EffectBufferHalHidl*>(mOutBuffer.get())->hidlBuffer());
- if (ret.isOk() && ret == Result::OK) {
- mBuffersChanged = false;
- return OK;
- }
- return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData) {
- if (mEffect == 0) return NO_INIT;
-
- // Special cases.
- if (cmdCode == EFFECT_CMD_SET_CONFIG || cmdCode == EFFECT_CMD_SET_CONFIG_REVERSE) {
- return setConfigImpl(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
- } else if (cmdCode == EFFECT_CMD_GET_CONFIG || cmdCode == EFFECT_CMD_GET_CONFIG_REVERSE) {
- return getConfigImpl(cmdCode, replySize, pReplyData);
- }
-
- // Common case.
- hidl_vec<uint8_t> hidlData;
- if (pCmdData != nullptr && cmdSize > 0) {
- hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
- }
- status_t status;
- uint32_t replySizeStub = 0;
- if (replySize == nullptr || pReplyData == nullptr) replySize = &replySizeStub;
- Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
- [&](int32_t s, const hidl_vec<uint8_t>& result) {
- status = s;
- if (status == 0) {
- if (*replySize > result.size()) *replySize = result.size();
- if (pReplyData != nullptr && *replySize > 0) {
- memcpy(pReplyData, &result[0], *replySize);
- }
- }
- });
- return ret.isOk() ? status : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
- if (mEffect == 0) return NO_INIT;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffect->getDescriptor(
- [&](Result r, const EffectDescriptor& result) {
- retval = r;
- if (retval == Result::OK) {
- effectDescriptorToHal(result, pDescriptor);
- }
- });
- return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::close() {
- if (mEffect == 0) return NO_INIT;
- Return<Result> ret = mEffect->close();
- return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::getConfigImpl(
- uint32_t cmdCode, uint32_t *replySize, void *pReplyData) {
- if (replySize == NULL || *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
- return BAD_VALUE;
- }
- status_t result = FAILED_TRANSACTION;
- Return<void> ret;
- if (cmdCode == EFFECT_CMD_GET_CONFIG) {
- ret = mEffect->getConfig([&] (Result r, const EffectConfig &hidlConfig) {
- result = analyzeResult(r);
- if (r == Result::OK) {
- effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
- }
- });
- } else {
- ret = mEffect->getConfigReverse([&] (Result r, const EffectConfig &hidlConfig) {
- result = analyzeResult(r);
- if (r == Result::OK) {
- effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
- }
- });
- }
- if (!ret.isOk()) {
- result = FAILED_TRANSACTION;
- }
- return result;
-}
-
-status_t EffectHalHidl::setConfigImpl(
- uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, void *pReplyData) {
- if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) ||
- replySize == NULL || *replySize != sizeof(int32_t) || pReplyData == NULL) {
- return BAD_VALUE;
- }
- const effect_config_t *halConfig = static_cast<effect_config_t*>(pCmdData);
- if (halConfig->inputCfg.bufferProvider.getBuffer != NULL ||
- halConfig->inputCfg.bufferProvider.releaseBuffer != NULL ||
- halConfig->outputCfg.bufferProvider.getBuffer != NULL ||
- halConfig->outputCfg.bufferProvider.releaseBuffer != NULL) {
- ALOGE("Buffer provider callbacks are not supported");
- }
- EffectConfig hidlConfig;
- effectConfigFromHal(*halConfig, &hidlConfig);
- Return<Result> ret = cmdCode == EFFECT_CMD_SET_CONFIG ?
- mEffect->setConfig(hidlConfig, nullptr, nullptr) :
- mEffect->setConfigReverse(hidlConfig, nullptr, nullptr);
- status_t result = FAILED_TRANSACTION;
- if (ret.isOk()) {
- result = analyzeResult(ret);
- *static_cast<int32_t*>(pReplyData) = result;
- }
- return result;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp b/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp
deleted file mode 100644
index 0d40e6d..0000000
--- a/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectsFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <cutils/native_handle.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-#include "EffectHalHidl.h"
-#include "EffectsFactoryHalHidl.h"
-#include "HidlUtils.h"
-
-using ::android::hardware::audio::common::V2_0::HidlUtils;
-using ::android::hardware::audio::common::V2_0::Uuid;
-using ::android::hardware::audio::effect::V2_0::IEffect;
-using ::android::hardware::audio::effect::V2_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-
-EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
- mEffectsFactory = IEffectsFactory::getService();
- if (mEffectsFactory == 0) {
- ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
- exit(1);
- }
-}
-
-EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
-}
-
-status_t EffectsFactoryHalHidl::queryAllDescriptors() {
- if (mEffectsFactory == 0) return NO_INIT;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->getAllDescriptors(
- [&](Result r, const hidl_vec<EffectDescriptor>& result) {
- retval = r;
- if (retval == Result::OK) {
- mLastDescriptors = result;
- }
- });
- if (ret.isOk()) {
- return retval == Result::OK ? OK : NO_INIT;
- }
- mLastDescriptors.resize(0);
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::queryNumberEffects(uint32_t *pNumEffects) {
- status_t queryResult = queryAllDescriptors();
- if (queryResult == OK) {
- *pNumEffects = mLastDescriptors.size();
- }
- return queryResult;
-}
-
-status_t EffectsFactoryHalHidl::getDescriptor(
- uint32_t index, effect_descriptor_t *pDescriptor) {
- // TODO: We need somehow to track the changes on the server side
- // or figure out how to convert everybody to query all the descriptors at once.
- // TODO: check for nullptr
- if (mLastDescriptors.size() == 0) {
- status_t queryResult = queryAllDescriptors();
- if (queryResult != OK) return queryResult;
- }
- if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
- EffectHalHidl::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
- return OK;
-}
-
-status_t EffectsFactoryHalHidl::getDescriptor(
- const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
- // TODO: check for nullptr
- if (mEffectsFactory == 0) return NO_INIT;
- Uuid hidlUuid;
- HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->getDescriptor(hidlUuid,
- [&](Result r, const EffectDescriptor& result) {
- retval = r;
- if (retval == Result::OK) {
- EffectHalHidl::effectDescriptorToHal(result, pDescriptor);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
- else return NO_INIT;
- }
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::createEffect(
- const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect) {
- if (mEffectsFactory == 0) return NO_INIT;
- Uuid hidlUuid;
- HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->createEffect(
- hidlUuid, sessionId, ioId,
- [&](Result r, const sp<IEffect>& result, uint64_t effectId) {
- retval = r;
- if (retval == Result::OK) {
- *effect = new EffectHalHidl(result, effectId);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
- else return NO_INIT;
- }
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::dumpEffects(int fd) {
- if (mEffectsFactory == 0) return NO_INIT;
- native_handle_t* hidlHandle = native_handle_create(1, 0);
- hidlHandle->data[0] = fd;
- Return<void> ret = mEffectsFactory->debugDump(hidlHandle);
- native_handle_delete(hidlHandle);
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) {
- return EffectBufferHalHidl::allocate(size, buffer);
-}
-
-status_t EffectsFactoryHalHidl::mirrorBuffer(void* external, size_t size,
- sp<EffectBufferHalInterface>* buffer) {
- return EffectBufferHalHidl::mirror(external, size, buffer);
-}
-
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalHidl.cpp b/media/libaudiohal/2.0/StreamHalHidl.cpp
deleted file mode 100644
index 9869cd2..0000000
--- a/media/libaudiohal/2.0/StreamHalHidl.cpp
+++ /dev/null
@@ -1,768 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "StreamHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IStreamOutCallback.h>
-#include <hwbinder/IPCThreadState.h>
-#include <mediautils/SchedulingPolicyService.h>
-#include <utils/Log.h>
-
-#include "DeviceHalHidl.h"
-#include "EffectHalHidl.h"
-#include "StreamHalHidl.h"
-
-using ::android::hardware::audio::common::V2_0::AudioChannelMask;
-using ::android::hardware::audio::common::V2_0::AudioFormat;
-using ::android::hardware::audio::common::V2_0::ThreadInfo;
-using ::android::hardware::audio::V2_0::AudioDrain;
-using ::android::hardware::audio::V2_0::IStreamOutCallback;
-using ::android::hardware::audio::V2_0::MessageQueueFlagBits;
-using ::android::hardware::audio::V2_0::MmapBufferInfo;
-using ::android::hardware::audio::V2_0::MmapPosition;
-using ::android::hardware::audio::V2_0::ParameterValue;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::audio::V2_0::TimeSpec;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ReadCommand = ::android::hardware::audio::V2_0::IStreamIn::ReadCommand;
-
-namespace android {
-
-StreamHalHidl::StreamHalHidl(IStream *stream)
- : ConversionHelperHidl("Stream"),
- mStream(stream),
- mHalThreadPriority(HAL_THREAD_PRIORITY_DEFAULT),
- mCachedBufferSize(0){
-
- // Instrument audio signal power logging.
- // Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
- // Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
- Return<void> ret = mStream->getAudioProperties(
- [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
- mStreamPowerLog.init(sr,
- static_cast<audio_channel_mask_t>(m),
- static_cast<audio_format_t>(f));
- });
- }
-}
-
-StreamHalHidl::~StreamHalHidl() {
- mStream = nullptr;
-}
-
-status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
- if (!mStream) return NO_INIT;
- return processReturn("getSampleRate", mStream->getSampleRate(), rate);
-}
-
-status_t StreamHalHidl::getBufferSize(size_t *size) {
- if (!mStream) return NO_INIT;
- status_t status = processReturn("getBufferSize", mStream->getBufferSize(), size);
- if (status == OK) {
- mCachedBufferSize = *size;
- }
- return status;
-}
-
-status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
- if (!mStream) return NO_INIT;
- return processReturn("getChannelMask", mStream->getChannelMask(), mask);
-}
-
-status_t StreamHalHidl::getFormat(audio_format_t *format) {
- if (!mStream) return NO_INIT;
- return processReturn("getFormat", mStream->getFormat(), format);
-}
-
-status_t StreamHalHidl::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
- if (!mStream) return NO_INIT;
- Return<void> ret = mStream->getAudioProperties(
- [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
- *sampleRate = sr;
- *mask = static_cast<audio_channel_mask_t>(m);
- *format = static_cast<audio_format_t>(f);
- });
- return processReturn("getAudioProperties", ret);
-}
-
-status_t StreamHalHidl::setParameters(const String8& kvPairs) {
- if (!mStream) return NO_INIT;
- hidl_vec<ParameterValue> hidlParams;
- status_t status = parametersFromHal(kvPairs, &hidlParams);
- if (status != OK) return status;
- return processReturn("setParameters", mStream->setParameters(hidlParams));
-}
-
-status_t StreamHalHidl::getParameters(const String8& keys, String8 *values) {
- values->clear();
- if (!mStream) return NO_INIT;
- hidl_vec<hidl_string> hidlKeys;
- status_t status = keysFromHal(keys, &hidlKeys);
- if (status != OK) return status;
- Result retval;
- Return<void> ret = mStream->getParameters(
- hidlKeys,
- [&](Result r, const hidl_vec<ParameterValue>& parameters) {
- retval = r;
- if (retval == Result::OK) {
- parametersToHal(parameters, values);
- }
- });
- return processReturn("getParameters", ret, retval);
-}
-
-status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
- if (!mStream) return NO_INIT;
- return processReturn("addEffect", mStream->addEffect(
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
-}
-
-status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
- if (!mStream) return NO_INIT;
- return processReturn("removeEffect", mStream->removeEffect(
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
-}
-
-status_t StreamHalHidl::standby() {
- if (!mStream) return NO_INIT;
- return processReturn("standby", mStream->standby());
-}
-
-status_t StreamHalHidl::dump(int fd) {
- if (!mStream) return NO_INIT;
- native_handle_t* hidlHandle = native_handle_create(1, 0);
- hidlHandle->data[0] = fd;
- Return<void> ret = mStream->debugDump(hidlHandle);
- native_handle_delete(hidlHandle);
- mStreamPowerLog.dump(fd);
- return processReturn("dump", ret);
-}
-
-status_t StreamHalHidl::start() {
- if (!mStream) return NO_INIT;
- return processReturn("start", mStream->start());
-}
-
-status_t StreamHalHidl::stop() {
- if (!mStream) return NO_INIT;
- return processReturn("stop", mStream->stop());
-}
-
-status_t StreamHalHidl::createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) {
- Result retval;
- Return<void> ret = mStream->createMmapBuffer(
- minSizeFrames,
- [&](Result r, const MmapBufferInfo& hidlInfo) {
- retval = r;
- if (retval == Result::OK) {
- const native_handle *handle = hidlInfo.sharedMemory.handle();
- if (handle->numFds > 0) {
- info->shared_memory_fd = handle->data[0];
- info->buffer_size_frames = hidlInfo.bufferSizeFrames;
- info->burst_size_frames = hidlInfo.burstSizeFrames;
- // info->shared_memory_address is not needed in HIDL context
- info->shared_memory_address = NULL;
- } else {
- retval = Result::NOT_INITIALIZED;
- }
- }
- });
- return processReturn("createMmapBuffer", ret, retval);
-}
-
-status_t StreamHalHidl::getMmapPosition(struct audio_mmap_position *position) {
- Result retval;
- Return<void> ret = mStream->getMmapPosition(
- [&](Result r, const MmapPosition& hidlPosition) {
- retval = r;
- if (retval == Result::OK) {
- position->time_nanoseconds = hidlPosition.timeNanoseconds;
- position->position_frames = hidlPosition.positionFrames;
- }
- });
- return processReturn("getMmapPosition", ret, retval);
-}
-
-status_t StreamHalHidl::setHalThreadPriority(int priority) {
- mHalThreadPriority = priority;
- return OK;
-}
-
-status_t StreamHalHidl::getCachedBufferSize(size_t *size) {
- if (mCachedBufferSize != 0) {
- *size = mCachedBufferSize;
- return OK;
- }
- return getBufferSize(size);
-}
-
-bool StreamHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
- if (mHalThreadPriority == HAL_THREAD_PRIORITY_DEFAULT) {
- return true;
- }
- int err = requestPriority(
- threadPid, threadId,
- mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
- ALOGE_IF(err, "failed to set priority %d for pid %d tid %d; error %d",
- mHalThreadPriority, threadPid, threadId, err);
- // Audio will still work, but latency will be higher and sometimes unacceptable.
- return err == 0;
-}
-
-namespace {
-
-/* Notes on callback ownership.
-
-This is how (Hw)Binder ownership model looks like. The server implementation
-is owned by Binder framework (via sp<>). Proxies are owned by clients.
-When the last proxy disappears, Binder framework releases the server impl.
-
-Thus, it is not needed to keep any references to StreamOutCallback (this is
-the server impl) -- it will live as long as HAL server holds a strong ref to
-IStreamOutCallback proxy. We clear that reference by calling 'clearCallback'
-from the destructor of StreamOutHalHidl.
-
-The callback only keeps a weak reference to the stream. The stream is owned
-by AudioFlinger.
-
-*/
-
-struct StreamOutCallback : public IStreamOutCallback {
- StreamOutCallback(const wp<StreamOutHalHidl>& stream) : mStream(stream) {}
-
- // IStreamOutCallback implementation
- Return<void> onWriteReady() override {
- sp<StreamOutHalHidl> stream = mStream.promote();
- if (stream != 0) {
- stream->onWriteReady();
- }
- return Void();
- }
-
- Return<void> onDrainReady() override {
- sp<StreamOutHalHidl> stream = mStream.promote();
- if (stream != 0) {
- stream->onDrainReady();
- }
- return Void();
- }
-
- Return<void> onError() override {
- sp<StreamOutHalHidl> stream = mStream.promote();
- if (stream != 0) {
- stream->onError();
- }
- return Void();
- }
-
- private:
- wp<StreamOutHalHidl> mStream;
-};
-
-} // namespace
-
-StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
- : StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
-}
-
-StreamOutHalHidl::~StreamOutHalHidl() {
- if (mStream != 0) {
- if (mCallback.unsafe_get()) {
- processReturn("clearCallback", mStream->clearCallback());
- }
- processReturn("close", mStream->close());
- mStream.clear();
- }
- mCallback.clear();
- hardware::IPCThreadState::self()->flushCommands();
- if (mEfGroup) {
- EventFlag::deleteEventFlag(&mEfGroup);
- }
-}
-
-status_t StreamOutHalHidl::getFrameSize(size_t *size) {
- if (mStream == 0) return NO_INIT;
- return processReturn("getFrameSize", mStream->getFrameSize(), size);
-}
-
-status_t StreamOutHalHidl::getLatency(uint32_t *latency) {
- if (mStream == 0) return NO_INIT;
- if (mWriterClient == gettid() && mCommandMQ) {
- return callWriterThread(
- WriteCommand::GET_LATENCY, "getLatency", nullptr, 0,
- [&](const WriteStatus& writeStatus) {
- *latency = writeStatus.reply.latencyMs;
- });
- } else {
- return processReturn("getLatency", mStream->getLatency(), latency);
- }
-}
-
-status_t StreamOutHalHidl::setVolume(float left, float right) {
- if (mStream == 0) return NO_INIT;
- return processReturn("setVolume", mStream->setVolume(left, right));
-}
-
-status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
- if (mStream == 0) return NO_INIT;
- *written = 0;
-
- if (bytes == 0 && !mDataMQ) {
- // Can't determine the size for the MQ buffer. Wait for a non-empty write request.
- ALOGW_IF(mCallback.unsafe_get(), "First call to async write with 0 bytes");
- return OK;
- }
-
- status_t status;
- if (!mDataMQ) {
- // In case if playback starts close to the end of a compressed track, the bytes
- // that need to be written is less than the actual buffer size. Need to use
- // full buffer size for the MQ since otherwise after seeking back to the middle
- // data will be truncated.
- size_t bufferSize;
- if ((status = getCachedBufferSize(&bufferSize)) != OK) {
- return status;
- }
- if (bytes > bufferSize) bufferSize = bytes;
- if ((status = prepareForWriting(bufferSize)) != OK) {
- return status;
- }
- }
-
- status = callWriterThread(
- WriteCommand::WRITE, "write", static_cast<const uint8_t*>(buffer), bytes,
- [&] (const WriteStatus& writeStatus) {
- *written = writeStatus.reply.written;
- // Diagnostics of the cause of b/35813113.
- ALOGE_IF(*written > bytes,
- "hal reports more bytes written than asked for: %lld > %lld",
- (long long)*written, (long long)bytes);
- });
- mStreamPowerLog.log(buffer, *written);
- return status;
-}
-
-status_t StreamOutHalHidl::callWriterThread(
- WriteCommand cmd, const char* cmdName,
- const uint8_t* data, size_t dataSize, StreamOutHalHidl::WriterCallback callback) {
- if (!mCommandMQ->write(&cmd)) {
- ALOGE("command message queue write failed for \"%s\"", cmdName);
- return -EAGAIN;
- }
- if (data != nullptr) {
- size_t availableToWrite = mDataMQ->availableToWrite();
- if (dataSize > availableToWrite) {
- ALOGW("truncating write data from %lld to %lld due to insufficient data queue space",
- (long long)dataSize, (long long)availableToWrite);
- dataSize = availableToWrite;
- }
- if (!mDataMQ->write(data, dataSize)) {
- ALOGE("data message queue write failed for \"%s\"", cmdName);
- }
- }
- mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
-
- // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
- uint32_t efState = 0;
-retry:
- status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState);
- if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
- WriteStatus writeStatus;
- writeStatus.retval = Result::NOT_INITIALIZED;
- if (!mStatusMQ->read(&writeStatus)) {
- ALOGE("status message read failed for \"%s\"", cmdName);
- }
- if (writeStatus.retval == Result::OK) {
- ret = OK;
- callback(writeStatus);
- } else {
- ret = processReturn(cmdName, writeStatus.retval);
- }
- return ret;
- }
- if (ret == -EAGAIN || ret == -EINTR) {
- // Spurious wakeup. This normally retries no more than once.
- goto retry;
- }
- return ret;
-}
-
-status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
- std::unique_ptr<CommandMQ> tempCommandMQ;
- std::unique_ptr<DataMQ> tempDataMQ;
- std::unique_ptr<StatusMQ> tempStatusMQ;
- Result retval;
- pid_t halThreadPid, halThreadTid;
- Return<void> ret = mStream->prepareForWriting(
- 1, bufferSize,
- [&](Result r,
- const CommandMQ::Descriptor& commandMQ,
- const DataMQ::Descriptor& dataMQ,
- const StatusMQ::Descriptor& statusMQ,
- const ThreadInfo& halThreadInfo) {
- retval = r;
- if (retval == Result::OK) {
- tempCommandMQ.reset(new CommandMQ(commandMQ));
- tempDataMQ.reset(new DataMQ(dataMQ));
- tempStatusMQ.reset(new StatusMQ(statusMQ));
- if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
- }
- halThreadPid = halThreadInfo.pid;
- halThreadTid = halThreadInfo.tid;
- }
- });
- if (!ret.isOk() || retval != Result::OK) {
- return processReturn("prepareForWriting", ret, retval);
- }
- if (!tempCommandMQ || !tempCommandMQ->isValid() ||
- !tempDataMQ || !tempDataMQ->isValid() ||
- !tempStatusMQ || !tempStatusMQ->isValid() ||
- !mEfGroup) {
- ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
- ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
- "Command message queue for writing is invalid");
- ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for writing");
- ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for writing is invalid");
- ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for writing");
- ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
- "Status message queue for writing is invalid");
- ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
- return NO_INIT;
- }
- requestHalThreadPriority(halThreadPid, halThreadTid);
-
- mCommandMQ = std::move(tempCommandMQ);
- mDataMQ = std::move(tempDataMQ);
- mStatusMQ = std::move(tempStatusMQ);
- mWriterClient = gettid();
- return OK;
-}
-
-status_t StreamOutHalHidl::getRenderPosition(uint32_t *dspFrames) {
- if (mStream == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mStream->getRenderPosition(
- [&](Result r, uint32_t d) {
- retval = r;
- if (retval == Result::OK) {
- *dspFrames = d;
- }
- });
- return processReturn("getRenderPosition", ret, retval);
-}
-
-status_t StreamOutHalHidl::getNextWriteTimestamp(int64_t *timestamp) {
- if (mStream == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mStream->getNextWriteTimestamp(
- [&](Result r, int64_t t) {
- retval = r;
- if (retval == Result::OK) {
- *timestamp = t;
- }
- });
- return processReturn("getRenderPosition", ret, retval);
-}
-
-status_t StreamOutHalHidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
- if (mStream == 0) return NO_INIT;
- status_t status = processReturn(
- "setCallback", mStream->setCallback(new StreamOutCallback(this)));
- if (status == OK) {
- mCallback = callback;
- }
- return status;
-}
-
-status_t StreamOutHalHidl::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
- if (mStream == 0) return NO_INIT;
- Return<void> ret = mStream->supportsPauseAndResume(
- [&](bool p, bool r) {
- *supportsPause = p;
- *supportsResume = r;
- });
- return processReturn("supportsPauseAndResume", ret);
-}
-
-status_t StreamOutHalHidl::pause() {
- if (mStream == 0) return NO_INIT;
- return processReturn("pause", mStream->pause());
-}
-
-status_t StreamOutHalHidl::resume() {
- if (mStream == 0) return NO_INIT;
- return processReturn("pause", mStream->resume());
-}
-
-status_t StreamOutHalHidl::supportsDrain(bool *supportsDrain) {
- if (mStream == 0) return NO_INIT;
- return processReturn("supportsDrain", mStream->supportsDrain(), supportsDrain);
-}
-
-status_t StreamOutHalHidl::drain(bool earlyNotify) {
- if (mStream == 0) return NO_INIT;
- return processReturn(
- "drain", mStream->drain(earlyNotify ? AudioDrain::EARLY_NOTIFY : AudioDrain::ALL));
-}
-
-status_t StreamOutHalHidl::flush() {
- if (mStream == 0) return NO_INIT;
- return processReturn("pause", mStream->flush());
-}
-
-status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
- if (mStream == 0) return NO_INIT;
- if (mWriterClient == gettid() && mCommandMQ) {
- return callWriterThread(
- WriteCommand::GET_PRESENTATION_POSITION, "getPresentationPosition", nullptr, 0,
- [&](const WriteStatus& writeStatus) {
- *frames = writeStatus.reply.presentationPosition.frames;
- timestamp->tv_sec = writeStatus.reply.presentationPosition.timeStamp.tvSec;
- timestamp->tv_nsec = writeStatus.reply.presentationPosition.timeStamp.tvNSec;
- });
- } else {
- Result retval;
- Return<void> ret = mStream->getPresentationPosition(
- [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
- retval = r;
- if (retval == Result::OK) {
- *frames = hidlFrames;
- timestamp->tv_sec = hidlTimeStamp.tvSec;
- timestamp->tv_nsec = hidlTimeStamp.tvNSec;
- }
- });
- return processReturn("getPresentationPosition", ret, retval);
- }
-}
-
-status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
- // Audio HAL V2.0 does not support propagating source metadata
- return INVALID_OPERATION;
-}
-
-void StreamOutHalHidl::onWriteReady() {
- sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
- if (callback == 0) return;
- ALOGV("asyncCallback onWriteReady");
- callback->onWriteReady();
-}
-
-void StreamOutHalHidl::onDrainReady() {
- sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
- if (callback == 0) return;
- ALOGV("asyncCallback onDrainReady");
- callback->onDrainReady();
-}
-
-void StreamOutHalHidl::onError() {
- sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
- if (callback == 0) return;
- ALOGV("asyncCallback onError");
- callback->onError();
-}
-
-
-StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
- : StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
-}
-
-StreamInHalHidl::~StreamInHalHidl() {
- if (mStream != 0) {
- processReturn("close", mStream->close());
- mStream.clear();
- hardware::IPCThreadState::self()->flushCommands();
- }
- if (mEfGroup) {
- EventFlag::deleteEventFlag(&mEfGroup);
- }
-}
-
-status_t StreamInHalHidl::getFrameSize(size_t *size) {
- if (mStream == 0) return NO_INIT;
- return processReturn("getFrameSize", mStream->getFrameSize(), size);
-}
-
-status_t StreamInHalHidl::setGain(float gain) {
- if (mStream == 0) return NO_INIT;
- return processReturn("setGain", mStream->setGain(gain));
-}
-
-status_t StreamInHalHidl::read(void *buffer, size_t bytes, size_t *read) {
- if (mStream == 0) return NO_INIT;
- *read = 0;
-
- if (bytes == 0 && !mDataMQ) {
- // Can't determine the size for the MQ buffer. Wait for a non-empty read request.
- return OK;
- }
-
- status_t status;
- if (!mDataMQ && (status = prepareForReading(bytes)) != OK) {
- return status;
- }
-
- ReadParameters params;
- params.command = ReadCommand::READ;
- params.params.read = bytes;
- status = callReaderThread(params, "read",
- [&](const ReadStatus& readStatus) {
- const size_t availToRead = mDataMQ->availableToRead();
- if (!mDataMQ->read(static_cast<uint8_t*>(buffer), std::min(bytes, availToRead))) {
- ALOGE("data message queue read failed for \"read\"");
- }
- ALOGW_IF(availToRead != readStatus.reply.read,
- "HAL read report inconsistent: mq = %d, status = %d",
- (int32_t)availToRead, (int32_t)readStatus.reply.read);
- *read = readStatus.reply.read;
- });
- mStreamPowerLog.log(buffer, *read);
- return status;
-}
-
-status_t StreamInHalHidl::callReaderThread(
- const ReadParameters& params, const char* cmdName,
- StreamInHalHidl::ReaderCallback callback) {
- if (!mCommandMQ->write(¶ms)) {
- ALOGW("command message queue write failed");
- return -EAGAIN;
- }
- mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
-
- // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
- uint32_t efState = 0;
-retry:
- status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY), &efState);
- if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY)) {
- ReadStatus readStatus;
- readStatus.retval = Result::NOT_INITIALIZED;
- if (!mStatusMQ->read(&readStatus)) {
- ALOGE("status message read failed for \"%s\"", cmdName);
- }
- if (readStatus.retval == Result::OK) {
- ret = OK;
- callback(readStatus);
- } else {
- ret = processReturn(cmdName, readStatus.retval);
- }
- return ret;
- }
- if (ret == -EAGAIN || ret == -EINTR) {
- // Spurious wakeup. This normally retries no more than once.
- goto retry;
- }
- return ret;
-}
-
-status_t StreamInHalHidl::prepareForReading(size_t bufferSize) {
- std::unique_ptr<CommandMQ> tempCommandMQ;
- std::unique_ptr<DataMQ> tempDataMQ;
- std::unique_ptr<StatusMQ> tempStatusMQ;
- Result retval;
- pid_t halThreadPid, halThreadTid;
- Return<void> ret = mStream->prepareForReading(
- 1, bufferSize,
- [&](Result r,
- const CommandMQ::Descriptor& commandMQ,
- const DataMQ::Descriptor& dataMQ,
- const StatusMQ::Descriptor& statusMQ,
- const ThreadInfo& halThreadInfo) {
- retval = r;
- if (retval == Result::OK) {
- tempCommandMQ.reset(new CommandMQ(commandMQ));
- tempDataMQ.reset(new DataMQ(dataMQ));
- tempStatusMQ.reset(new StatusMQ(statusMQ));
- if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
- }
- halThreadPid = halThreadInfo.pid;
- halThreadTid = halThreadInfo.tid;
- }
- });
- if (!ret.isOk() || retval != Result::OK) {
- return processReturn("prepareForReading", ret, retval);
- }
- if (!tempCommandMQ || !tempCommandMQ->isValid() ||
- !tempDataMQ || !tempDataMQ->isValid() ||
- !tempStatusMQ || !tempStatusMQ->isValid() ||
- !mEfGroup) {
- ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
- ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
- "Command message queue for writing is invalid");
- ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for reading");
- ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for reading is invalid");
- ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for reading");
- ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
- "Status message queue for reading is invalid");
- ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
- return NO_INIT;
- }
- requestHalThreadPriority(halThreadPid, halThreadTid);
-
- mCommandMQ = std::move(tempCommandMQ);
- mDataMQ = std::move(tempDataMQ);
- mStatusMQ = std::move(tempStatusMQ);
- mReaderClient = gettid();
- return OK;
-}
-
-status_t StreamInHalHidl::getInputFramesLost(uint32_t *framesLost) {
- if (mStream == 0) return NO_INIT;
- return processReturn("getInputFramesLost", mStream->getInputFramesLost(), framesLost);
-}
-
-status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
- if (mStream == 0) return NO_INIT;
- if (mReaderClient == gettid() && mCommandMQ) {
- ReadParameters params;
- params.command = ReadCommand::GET_CAPTURE_POSITION;
- return callReaderThread(params, "getCapturePosition",
- [&](const ReadStatus& readStatus) {
- *frames = readStatus.reply.capturePosition.frames;
- *time = readStatus.reply.capturePosition.time;
- });
- } else {
- Result retval;
- Return<void> ret = mStream->getCapturePosition(
- [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
- retval = r;
- if (retval == Result::OK) {
- *frames = hidlFrames;
- *time = hidlTime;
- }
- });
- return processReturn("getCapturePosition", ret, retval);
- }
-}
-
-status_t StreamInHalHidl::getActiveMicrophones(
- std::vector<media::MicrophoneInfo> *microphones __unused) {
- if (mStream == 0) return NO_INIT;
- return INVALID_OPERATION;
-}
-
-status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
- // Audio HAL V2.0 does not support propagating sink metadata
- return INVALID_OPERATION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalLocal.cpp b/media/libaudiohal/2.0/StreamHalLocal.cpp
deleted file mode 100644
index 98107e5..0000000
--- a/media/libaudiohal/2.0/StreamHalLocal.cpp
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "StreamHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <hardware/audio.h>
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "StreamHalLocal.h"
-
-namespace android {
-
-StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
- : mDevice(device),
- mStream(stream) {
- // Instrument audio signal power logging.
- // Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
- mStreamPowerLog.init(mStream->get_sample_rate(mStream),
- mStream->get_channels(mStream),
- mStream->get_format(mStream));
- }
-}
-
-StreamHalLocal::~StreamHalLocal() {
- mStream = 0;
- mDevice.clear();
-}
-
-status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
- *rate = mStream->get_sample_rate(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getBufferSize(size_t *size) {
- *size = mStream->get_buffer_size(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
- *mask = mStream->get_channels(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getFormat(audio_format_t *format) {
- *format = mStream->get_format(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
- *sampleRate = mStream->get_sample_rate(mStream);
- *mask = mStream->get_channels(mStream);
- *format = mStream->get_format(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::setParameters(const String8& kvPairs) {
- return mStream->set_parameters(mStream, kvPairs.string());
-}
-
-status_t StreamHalLocal::getParameters(const String8& keys, String8 *values) {
- char *halValues = mStream->get_parameters(mStream, keys.string());
- if (halValues != NULL) {
- values->setTo(halValues);
- free(halValues);
- } else {
- values->clear();
- }
- return OK;
-}
-
-status_t StreamHalLocal::addEffect(sp<EffectHalInterface>) {
- LOG_ALWAYS_FATAL("Local streams can not have effects");
- return INVALID_OPERATION;
-}
-
-status_t StreamHalLocal::removeEffect(sp<EffectHalInterface>) {
- LOG_ALWAYS_FATAL("Local streams can not have effects");
- return INVALID_OPERATION;
-}
-
-status_t StreamHalLocal::standby() {
- return mStream->standby(mStream);
-}
-
-status_t StreamHalLocal::dump(int fd) {
- status_t status = mStream->dump(mStream, fd);
- mStreamPowerLog.dump(fd);
- return status;
-}
-
-status_t StreamHalLocal::setHalThreadPriority(int) {
- // Don't need to do anything as local hal is executed by audioflinger directly
- // on the same thread.
- return OK;
-}
-
-StreamOutHalLocal::StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device)
- : StreamHalLocal(&stream->common, device), mStream(stream) {
-}
-
-StreamOutHalLocal::~StreamOutHalLocal() {
- mCallback.clear();
- mDevice->closeOutputStream(mStream);
- mStream = 0;
-}
-
-status_t StreamOutHalLocal::getFrameSize(size_t *size) {
- *size = audio_stream_out_frame_size(mStream);
- return OK;
-}
-
-status_t StreamOutHalLocal::getLatency(uint32_t *latency) {
- *latency = mStream->get_latency(mStream);
- return OK;
-}
-
-status_t StreamOutHalLocal::setVolume(float left, float right) {
- if (mStream->set_volume == NULL) return INVALID_OPERATION;
- return mStream->set_volume(mStream, left, right);
-}
-
-status_t StreamOutHalLocal::write(const void *buffer, size_t bytes, size_t *written) {
- ssize_t writeResult = mStream->write(mStream, buffer, bytes);
- if (writeResult > 0) {
- *written = writeResult;
- mStreamPowerLog.log(buffer, *written);
- return OK;
- } else {
- *written = 0;
- return writeResult;
- }
-}
-
-status_t StreamOutHalLocal::getRenderPosition(uint32_t *dspFrames) {
- return mStream->get_render_position(mStream, dspFrames);
-}
-
-status_t StreamOutHalLocal::getNextWriteTimestamp(int64_t *timestamp) {
- if (mStream->get_next_write_timestamp == NULL) return INVALID_OPERATION;
- return mStream->get_next_write_timestamp(mStream, timestamp);
-}
-
-status_t StreamOutHalLocal::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
- if (mStream->set_callback == NULL) return INVALID_OPERATION;
- status_t result = mStream->set_callback(mStream, StreamOutHalLocal::asyncCallback, this);
- if (result == OK) {
- mCallback = callback;
- }
- return result;
-}
-
-// static
-int StreamOutHalLocal::asyncCallback(stream_callback_event_t event, void*, void *cookie) {
- // We act as if we gave a wp<StreamOutHalLocal> to HAL. This way we should handle
- // correctly the case when the callback is invoked while StreamOutHalLocal's destructor is
- // already running, because the destructor is invoked after the refcount has been atomically
- // decremented.
- wp<StreamOutHalLocal> weakSelf(static_cast<StreamOutHalLocal*>(cookie));
- sp<StreamOutHalLocal> self = weakSelf.promote();
- if (self == 0) return 0;
- sp<StreamOutHalInterfaceCallback> callback = self->mCallback.promote();
- if (callback == 0) return 0;
- ALOGV("asyncCallback() event %d", event);
- switch (event) {
- case STREAM_CBK_EVENT_WRITE_READY:
- callback->onWriteReady();
- break;
- case STREAM_CBK_EVENT_DRAIN_READY:
- callback->onDrainReady();
- break;
- case STREAM_CBK_EVENT_ERROR:
- callback->onError();
- break;
- default:
- ALOGW("asyncCallback() unknown event %d", event);
- break;
- }
- return 0;
-}
-
-status_t StreamOutHalLocal::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
- *supportsPause = mStream->pause != NULL;
- *supportsResume = mStream->resume != NULL;
- return OK;
-}
-
-status_t StreamOutHalLocal::pause() {
- if (mStream->pause == NULL) return INVALID_OPERATION;
- return mStream->pause(mStream);
-}
-
-status_t StreamOutHalLocal::resume() {
- if (mStream->resume == NULL) return INVALID_OPERATION;
- return mStream->resume(mStream);
-}
-
-status_t StreamOutHalLocal::supportsDrain(bool *supportsDrain) {
- *supportsDrain = mStream->drain != NULL;
- return OK;
-}
-
-status_t StreamOutHalLocal::drain(bool earlyNotify) {
- if (mStream->drain == NULL) return INVALID_OPERATION;
- return mStream->drain(mStream, earlyNotify ? AUDIO_DRAIN_EARLY_NOTIFY : AUDIO_DRAIN_ALL);
-}
-
-status_t StreamOutHalLocal::flush() {
- if (mStream->flush == NULL) return INVALID_OPERATION;
- return mStream->flush(mStream);
-}
-
-status_t StreamOutHalLocal::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
- if (mStream->get_presentation_position == NULL) return INVALID_OPERATION;
- return mStream->get_presentation_position(mStream, frames, timestamp);
-}
-
-status_t StreamOutHalLocal::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
- if (mStream->update_source_metadata == nullptr) {
- return INVALID_OPERATION;
- }
- const source_metadata_t metadata {
- .track_count = sourceMetadata.tracks.size(),
- // const cast is fine as it is in a const structure
- .tracks = const_cast<playback_track_metadata*>(sourceMetadata.tracks.data()),
- };
- mStream->update_source_metadata(mStream, &metadata);
- return OK;
-}
-
-status_t StreamOutHalLocal::start() {
- if (mStream->start == NULL) return INVALID_OPERATION;
- return mStream->start(mStream);
-}
-
-status_t StreamOutHalLocal::stop() {
- if (mStream->stop == NULL) return INVALID_OPERATION;
- return mStream->stop(mStream);
-}
-
-status_t StreamOutHalLocal::createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) {
- if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
- return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
-}
-
-status_t StreamOutHalLocal::getMmapPosition(struct audio_mmap_position *position) {
- if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
- return mStream->get_mmap_position(mStream, position);
-}
-
-StreamInHalLocal::StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device)
- : StreamHalLocal(&stream->common, device), mStream(stream) {
-}
-
-StreamInHalLocal::~StreamInHalLocal() {
- mDevice->closeInputStream(mStream);
- mStream = 0;
-}
-
-status_t StreamInHalLocal::getFrameSize(size_t *size) {
- *size = audio_stream_in_frame_size(mStream);
- return OK;
-}
-
-status_t StreamInHalLocal::setGain(float gain) {
- return mStream->set_gain(mStream, gain);
-}
-
-status_t StreamInHalLocal::read(void *buffer, size_t bytes, size_t *read) {
- ssize_t readResult = mStream->read(mStream, buffer, bytes);
- if (readResult > 0) {
- *read = readResult;
- mStreamPowerLog.log( buffer, *read);
- return OK;
- } else {
- *read = 0;
- return readResult;
- }
-}
-
-status_t StreamInHalLocal::getInputFramesLost(uint32_t *framesLost) {
- *framesLost = mStream->get_input_frames_lost(mStream);
- return OK;
-}
-
-status_t StreamInHalLocal::getCapturePosition(int64_t *frames, int64_t *time) {
- if (mStream->get_capture_position == NULL) return INVALID_OPERATION;
- return mStream->get_capture_position(mStream, frames, time);
-}
-
-status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
- if (mStream->update_sink_metadata == nullptr) {
- return INVALID_OPERATION;
- }
- const sink_metadata_t metadata {
- .track_count = sinkMetadata.tracks.size(),
- // const cast is fine as it is in a const structure
- .tracks = const_cast<record_track_metadata*>(sinkMetadata.tracks.data()),
- };
- mStream->update_sink_metadata(mStream, &metadata);
- return OK;
-}
-
-status_t StreamInHalLocal::start() {
- if (mStream->start == NULL) return INVALID_OPERATION;
- return mStream->start(mStream);
-}
-
-status_t StreamInHalLocal::stop() {
- if (mStream->stop == NULL) return INVALID_OPERATION;
- return mStream->stop(mStream);
-}
-
-status_t StreamInHalLocal::createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) {
- if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
- return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
-}
-
-status_t StreamInHalLocal::getMmapPosition(struct audio_mmap_position *position) {
- if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
- return mStream->get_mmap_position(mStream, position);
-}
-
-status_t StreamInHalLocal::getActiveMicrophones(
- std::vector<media::MicrophoneInfo> *microphones __unused) {
- return INVALID_OPERATION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.h b/media/libaudiohal/4.0/ConversionHelperHidl.h
deleted file mode 100644
index 8823a8d..0000000
--- a/media/libaudiohal/4.0/ConversionHelperHidl.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
-#define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
-
-#include <android/hardware/audio/4.0/types.h>
-#include <hidl/HidlSupport.h>
-#include <system/audio.h>
-#include <utils/String8.h>
-
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::MicrophoneInfo;
-using ::android::hardware::Return;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-
-namespace android {
-namespace V4_0 {
-
-class ConversionHelperHidl {
- protected:
- static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
- static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
- static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
- static void microphoneInfoToHal(const MicrophoneInfo& src,
- audio_microphone_characteristic_t *pDst);
-
- ConversionHelperHidl(const char* className);
-
- template<typename R, typename T>
- status_t processReturn(const char* funcName, const Return<R>& ret, T *retval) {
- if (ret.isOk()) {
- // This way it also works for enum class to unscoped enum conversion.
- *retval = static_cast<T>(static_cast<R>(ret));
- return OK;
- }
- return processReturn(funcName, ret);
- }
-
- template<typename T>
- status_t processReturn(const char* funcName, const Return<T>& ret) {
- if (!ret.isOk()) {
- emitError(funcName, ret.description().c_str());
- }
- return ret.isOk() ? OK : FAILED_TRANSACTION;
- }
-
- status_t processReturn(const char* funcName, const Return<hardware::audio::V4_0::Result>& ret) {
- if (!ret.isOk()) {
- emitError(funcName, ret.description().c_str());
- }
- return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
- }
-
- template<typename T>
- status_t processReturn(
- const char* funcName, const Return<T>& ret, hardware::audio::V4_0::Result retval) {
- if (!ret.isOk()) {
- emitError(funcName, ret.description().c_str());
- }
- return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
- }
-
- private:
- const char* mClassName;
-
- static status_t analyzeResult(const hardware::audio::V4_0::Result& result);
-
- void emitError(const char* funcName, const char* description);
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.h b/media/libaudiohal/4.0/DeviceHalHidl.h
deleted file mode 100644
index 0bd2175..0000000
--- a/media/libaudiohal/4.0/DeviceHalHidl.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/4.0/IDevice.h>
-#include <android/hardware/audio/4.0/IPrimaryDevice.h>
-#include <media/audiohal/DeviceHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-
-using ::android::hardware::audio::V4_0::IDevice;
-using ::android::hardware::audio::V4_0::IPrimaryDevice;
-using ::android::hardware::Return;
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
-{
- public:
- // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
- virtual status_t getSupportedDevices(uint32_t *devices);
-
- // Check to see if the audio hardware interface has been initialized.
- virtual status_t initCheck();
-
- // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
- virtual status_t setVoiceVolume(float volume);
-
- // Set the audio volume for all audio activities other than voice call.
- virtual status_t setMasterVolume(float volume);
-
- // Get the current master volume value for the HAL.
- virtual status_t getMasterVolume(float *volume);
-
- // Called when the audio mode changes.
- virtual status_t setMode(audio_mode_t mode);
-
- // Muting control.
- virtual status_t setMicMute(bool state);
- virtual status_t getMicMute(bool *state);
- virtual status_t setMasterMute(bool state);
- virtual status_t getMasterMute(bool *state);
-
- // Set global audio parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get global audio parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Returns audio input buffer size according to parameters passed.
- virtual status_t getInputBufferSize(const struct audio_config *config,
- size_t *size);
-
- // Creates and opens the audio hardware output stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream);
-
- // Creates and opens the audio hardware input stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream);
-
- // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
- virtual status_t supportsAudioPatches(bool *supportsPatches);
-
- // Creates an audio patch between several source and sink ports.
- virtual status_t createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch);
-
- // Releases an audio patch.
- virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
-
- // Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port *port);
-
- // Set audio port configuration.
- virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-
- // List microphones
- virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
- virtual status_t dump(int fd);
-
- private:
- friend class DevicesFactoryHalHidl;
- sp<IDevice> mDevice;
- sp<IPrimaryDevice> mPrimaryDevice; // Null if it's not a primary device.
-
- // Can not be constructed directly by clients.
- explicit DeviceHalHidl(const sp<IDevice>& device);
-
- // The destructor automatically closes the device.
- virtual ~DeviceHalHidl();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.h b/media/libaudiohal/4.0/DeviceHalLocal.h
deleted file mode 100644
index 08341a4..0000000
--- a/media/libaudiohal/4.0/DeviceHalLocal.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
-#define ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
-
-#include <hardware/audio.h>
-#include <media/audiohal/DeviceHalInterface.h>
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalLocal : public DeviceHalInterface
-{
- public:
- // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
- virtual status_t getSupportedDevices(uint32_t *devices);
-
- // Check to see if the audio hardware interface has been initialized.
- virtual status_t initCheck();
-
- // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
- virtual status_t setVoiceVolume(float volume);
-
- // Set the audio volume for all audio activities other than voice call.
- virtual status_t setMasterVolume(float volume);
-
- // Get the current master volume value for the HAL.
- virtual status_t getMasterVolume(float *volume);
-
- // Called when the audio mode changes.
- virtual status_t setMode(audio_mode_t mode);
-
- // Muting control.
- virtual status_t setMicMute(bool state);
- virtual status_t getMicMute(bool *state);
- virtual status_t setMasterMute(bool state);
- virtual status_t getMasterMute(bool *state);
-
- // Set global audio parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get global audio parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Returns audio input buffer size according to parameters passed.
- virtual status_t getInputBufferSize(const struct audio_config *config,
- size_t *size);
-
- // Creates and opens the audio hardware output stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream);
-
- // Creates and opens the audio hardware input stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream);
-
- // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
- virtual status_t supportsAudioPatches(bool *supportsPatches);
-
- // Creates an audio patch between several source and sink ports.
- virtual status_t createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch);
-
- // Releases an audio patch.
- virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
-
- // Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port *port);
-
- // Set audio port configuration.
- virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-
- // List microphones
- virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
- virtual status_t dump(int fd);
-
- void closeOutputStream(struct audio_stream_out *stream_out);
- void closeInputStream(struct audio_stream_in *stream_in);
-
- private:
- audio_hw_device_t *mDev;
-
- friend class DevicesFactoryHalLocal;
-
- // Can not be constructed directly by clients.
- explicit DeviceHalLocal(audio_hw_device_t *dev);
-
- // The destructor automatically closes the device.
- virtual ~DeviceHalLocal();
-
- uint32_t version() const { return mDev->common.version; }
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp b/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp
deleted file mode 100644
index c83194e..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "DevicesFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/4.0/IDevice.h>
-#include <media/audiohal/hidl/HalDeathHandler.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "DeviceHalHidl.h"
-#include "DevicesFactoryHalHidl.h"
-
-using ::android::hardware::audio::V4_0::IDevice;
-using ::android::hardware::audio::V4_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-namespace V4_0 {
-
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
- mDevicesFactory = IDevicesFactory::getService();
- if (mDevicesFactory != 0) {
- // It is assumed that DevicesFactory is owned by AudioFlinger
- // and thus have the same lifespan.
- mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
- } else {
- ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
- exit(1);
- }
- // The MSD factory is optional
- mDevicesFactoryMsd = IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD);
- // TODO: Register death handler, and add 'restart' directive to audioserver.rc
-}
-
-DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
-}
-
-status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- if (mDevicesFactory == 0) return NO_INIT;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevicesFactory->openDevice(
- name,
- [&](Result r, const sp<IDevice>& result) {
- retval = r;
- if (retval == Result::OK) {
- *device = new DeviceHalHidl(result);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
- else return NO_INIT;
- }
- return FAILED_TRANSACTION;
-}
-
-} // namespace V4_0
-} // namespace android
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHidl.h b/media/libaudiohal/4.0/DevicesFactoryHalHidl.h
deleted file mode 100644
index 114889b..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalHidl.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/4.0/IDevicesFactory.h>
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-#include "DeviceHalHidl.h"
-
-using ::android::hardware::audio::V4_0::IDevicesFactory;
-
-namespace android {
-namespace V4_0 {
-
-class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
-{
- public:
- // Opens a device with the specified name. To close the device, it is
- // necessary to release references to the returned object.
- virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
- private:
- friend class DevicesFactoryHalHybrid;
-
- sp<IDevicesFactory> mDevicesFactory;
- sp<IDevicesFactory> mDevicesFactoryMsd;
-
- // Can not be constructed directly by clients.
- DevicesFactoryHalHidl();
-
- virtual ~DevicesFactoryHalHidl();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp
deleted file mode 100644
index 7ff1ec7d..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DevicesFactoryHalHybrid"
-//#define LOG_NDEBUG 0
-
-#include <libaudiohal/4.0/DevicesFactoryHalHybrid.h>
-#include "DevicesFactoryHalLocal.h"
-#include "DevicesFactoryHalHidl.h"
-
-namespace android {
-namespace V4_0 {
-
-DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
- : mLocalFactory(new DevicesFactoryHalLocal()),
- mHidlFactory(new DevicesFactoryHalHidl()) {
-}
-
-DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
-}
-
-status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
- strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
- return mHidlFactory->openDevice(name, device);
- }
- return mLocalFactory->openDevice(name, device);
-}
-
-} // namespace V4_0
-} // namespace android
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalLocal.h b/media/libaudiohal/4.0/DevicesFactoryHalLocal.h
deleted file mode 100644
index bc1c521..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalLocal.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
-
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-#include "DeviceHalLocal.h"
-
-namespace android {
-namespace V4_0 {
-
-class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
-{
- public:
- // Opens a device with the specified name. To close the device, it is
- // necessary to release references to the returned object.
- virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
- private:
- friend class DevicesFactoryHalHybrid;
-
- // Can not be constructed directly by clients.
- DevicesFactoryHalLocal() {}
-
- virtual ~DevicesFactoryHalLocal() {}
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/EffectBufferHalHidl.h b/media/libaudiohal/4.0/EffectBufferHalHidl.h
deleted file mode 100644
index 6d578c6..0000000
--- a/media/libaudiohal/4.0/EffectBufferHalHidl.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/effect/4.0/types.h>
-#include <android/hidl/memory/1.0/IMemory.h>
-#include <hidl/HidlSupport.h>
-#include <media/audiohal/EffectBufferHalInterface.h>
-#include <system/audio_effect.h>
-
-using android::hardware::audio::effect::V4_0::AudioBuffer;
-using android::hardware::hidl_memory;
-using android::hidl::memory::V1_0::IMemory;
-
-namespace android {
-namespace V4_0 {
-
-class EffectBufferHalHidl : public EffectBufferHalInterface
-{
- public:
- static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
- static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
-
- virtual audio_buffer_t* audioBuffer();
- virtual void* externalData() const;
-
- virtual size_t getSize() const override { return mBufferSize; }
-
- virtual void setExternalData(void* external);
- virtual void setFrameCount(size_t frameCount);
- virtual bool checkFrameCountChange();
-
- virtual void update();
- virtual void commit();
- virtual void update(size_t size);
- virtual void commit(size_t size);
-
- const AudioBuffer& hidlBuffer() const { return mHidlBuffer; }
-
- private:
- friend class EffectBufferHalInterface;
-
- static uint64_t makeUniqueId();
-
- const size_t mBufferSize;
- bool mFrameCountChanged;
- void* mExternalData;
- AudioBuffer mHidlBuffer;
- sp<IMemory> mMemory;
- audio_buffer_t mAudioBuffer;
-
- // Can not be constructed directly by clients.
- explicit EffectBufferHalHidl(size_t size);
-
- virtual ~EffectBufferHalHidl();
-
- status_t init();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/EffectHalHidl.h b/media/libaudiohal/4.0/EffectHalHidl.h
deleted file mode 100644
index 5a4dab1..0000000
--- a/media/libaudiohal/4.0/EffectHalHidl.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/effect/4.0/IEffect.h>
-#include <media/audiohal/EffectHalInterface.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
-#include <system/audio_effect.h>
-
-using ::android::hardware::audio::effect::V4_0::EffectBufferConfig;
-using ::android::hardware::audio::effect::V4_0::EffectConfig;
-using ::android::hardware::audio::effect::V4_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V4_0::IEffect;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-
-namespace android {
-namespace V4_0 {
-
-class EffectHalHidl : public EffectHalInterface
-{
- public:
- // Set the input buffer.
- virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
-
- // Set the output buffer.
- virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
-
- // Effect process function.
- virtual status_t process();
-
- // Process reverse stream function. This function is used to pass
- // a reference stream to the effect engine.
- virtual status_t processReverse();
-
- // Send a command and receive a response to/from effect engine.
- virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData);
-
- // Returns the effect descriptor.
- virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
-
- // Free resources on the remote side.
- virtual status_t close();
-
- // Whether it's a local implementation.
- virtual bool isLocal() const { return false; }
-
- uint64_t effectId() const { return mEffectId; }
-
- static void effectDescriptorToHal(
- const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor);
-
- private:
- friend class EffectsFactoryHalHidl;
- typedef MessageQueue<
- hardware::audio::effect::V4_0::Result, hardware::kSynchronizedReadWrite> StatusMQ;
-
- sp<IEffect> mEffect;
- const uint64_t mEffectId;
- sp<EffectBufferHalInterface> mInBuffer;
- sp<EffectBufferHalInterface> mOutBuffer;
- bool mBuffersChanged;
- std::unique_ptr<StatusMQ> mStatusMQ;
- EventFlag* mEfGroup;
-
- static status_t analyzeResult(const hardware::audio::effect::V4_0::Result& result);
- static void effectBufferConfigFromHal(
- const buffer_config_t& halConfig, EffectBufferConfig* config);
- static void effectBufferConfigToHal(
- const EffectBufferConfig& config, buffer_config_t* halConfig);
- static void effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config);
- static void effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig);
-
- // Can not be constructed directly by clients.
- EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
-
- // The destructor automatically releases the effect.
- virtual ~EffectHalHidl();
-
- status_t getConfigImpl(uint32_t cmdCode, uint32_t *replySize, void *pReplyData);
- status_t prepareForProcessing();
- bool needToResetBuffers();
- status_t processImpl(uint32_t mqFlag);
- status_t setConfigImpl(
- uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData);
- status_t setProcessBuffers();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamHalHidl.h b/media/libaudiohal/4.0/StreamHalHidl.h
deleted file mode 100644
index 2dda0f8..0000000
--- a/media/libaudiohal/4.0/StreamHalHidl.h
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
-
-#include <atomic>
-
-#include <android/hardware/audio/4.0/IStream.h>
-#include <android/hardware/audio/4.0/IStreamIn.h>
-#include <android/hardware/audio/4.0/IStreamOut.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
-#include <media/audiohal/StreamHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-#include "StreamPowerLog.h"
-
-using ::android::hardware::audio::V4_0::IStream;
-using ::android::hardware::audio::V4_0::IStreamIn;
-using ::android::hardware::audio::V4_0::IStreamOut;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::Return;
-using ReadParameters = ::android::hardware::audio::V4_0::IStreamIn::ReadParameters;
-using ReadStatus = ::android::hardware::audio::V4_0::IStreamIn::ReadStatus;
-using WriteCommand = ::android::hardware::audio::V4_0::IStreamOut::WriteCommand;
-using WriteStatus = ::android::hardware::audio::V4_0::IStreamOut::WriteStatus;
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalHidl;
-
-class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
-{
- public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
- // Return size of input/output buffer in bytes for this stream - eg. 4800.
- virtual status_t getBufferSize(size_t *size);
-
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
-
- // Set audio stream parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get audio stream parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Add or remove the effect on the stream.
- virtual status_t addEffect(sp<EffectHalInterface> effect);
- virtual status_t removeEffect(sp<EffectHalInterface> effect);
-
- // Put the audio hardware input/output into standby mode.
- virtual status_t standby();
-
- virtual status_t dump(int fd);
-
- // Start a stream operating in mmap mode.
- virtual status_t start();
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop();
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info);
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
- // Set the priority of the thread that interacts with the HAL
- // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
- virtual status_t setHalThreadPriority(int priority);
-
- protected:
- // Subclasses can not be constructed directly by clients.
- explicit StreamHalHidl(IStream *stream);
-
- // The destructor automatically closes the stream.
- virtual ~StreamHalHidl();
-
- status_t getCachedBufferSize(size_t *size);
-
- bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
-
- // mStreamPowerLog is used for audio signal power logging.
- StreamPowerLog mStreamPowerLog;
-
- private:
- const int HAL_THREAD_PRIORITY_DEFAULT = -1;
- IStream *mStream;
- int mHalThreadPriority;
- size_t mCachedBufferSize;
-};
-
-class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Return the audio hardware driver estimated latency in milliseconds.
- virtual status_t getLatency(uint32_t *latency);
-
- // Use this method in situations where audio mixing is done in the hardware.
- virtual status_t setVolume(float left, float right);
-
- // Write audio buffer to driver.
- virtual status_t write(const void *buffer, size_t bytes, size_t *written);
-
- // Return the number of audio frames written by the audio dsp to DAC since
- // the output has exited standby.
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-
- // Get the local time at which the next write to the audio driver will be presented.
- virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
- // Set the callback for notifying completion of non-blocking write and drain.
- virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
-
- // Returns whether pause and resume operations are supported.
- virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t pause();
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t resume();
-
- // Returns whether drain operation is supported.
- virtual status_t supportsDrain(bool *supportsDrain);
-
- // Requests notification when data buffered by the driver/hardware has been played.
- virtual status_t drain(bool earlyNotify);
-
- // Notifies to the audio driver to flush the queued data.
- virtual status_t flush();
-
- // Return a recent count of the number of audio frames presented to an external observer.
- virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
-
- // Called when the metadata of the stream's source has been changed.
- status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
-
- // Methods used by StreamOutCallback (HIDL).
- void onWriteReady();
- void onDrainReady();
- void onError();
-
- private:
- friend class DeviceHalHidl;
- typedef MessageQueue<WriteCommand, hardware::kSynchronizedReadWrite> CommandMQ;
- typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
- typedef MessageQueue<WriteStatus, hardware::kSynchronizedReadWrite> StatusMQ;
-
- wp<StreamOutHalInterfaceCallback> mCallback;
- sp<IStreamOut> mStream;
- std::unique_ptr<CommandMQ> mCommandMQ;
- std::unique_ptr<DataMQ> mDataMQ;
- std::unique_ptr<StatusMQ> mStatusMQ;
- std::atomic<pid_t> mWriterClient;
- EventFlag* mEfGroup;
-
- // Can not be constructed directly by clients.
- StreamOutHalHidl(const sp<IStreamOut>& stream);
-
- virtual ~StreamOutHalHidl();
-
- using WriterCallback = std::function<void(const WriteStatus& writeStatus)>;
- status_t callWriterThread(
- WriteCommand cmd, const char* cmdName,
- const uint8_t* data, size_t dataSize, WriterCallback callback);
- status_t prepareForWriting(size_t bufferSize);
-};
-
-class StreamInHalHidl : public StreamInHalInterface, public StreamHalHidl {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Set the input gain for the audio driver.
- virtual status_t setGain(float gain);
-
- // Read audio buffer in from driver.
- virtual status_t read(void *buffer, size_t bytes, size_t *read);
-
- // Return the amount of input frames lost in the audio driver.
- virtual status_t getInputFramesLost(uint32_t *framesLost);
-
- // Return a recent count of the number of audio frames received and
- // the clock time associated with that frame count.
- virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
-
- // Get active microphones
- virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
- // Called when the metadata of the stream's sink has been changed.
- status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
-
- private:
- friend class DeviceHalHidl;
- typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
- typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
- typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
-
- sp<IStreamIn> mStream;
- std::unique_ptr<CommandMQ> mCommandMQ;
- std::unique_ptr<DataMQ> mDataMQ;
- std::unique_ptr<StatusMQ> mStatusMQ;
- std::atomic<pid_t> mReaderClient;
- EventFlag* mEfGroup;
-
- // Can not be constructed directly by clients.
- StreamInHalHidl(const sp<IStreamIn>& stream);
-
- virtual ~StreamInHalHidl();
-
- using ReaderCallback = std::function<void(const ReadStatus& readStatus)>;
- status_t callReaderThread(
- const ReadParameters& params, const char* cmdName, ReaderCallback callback);
- status_t prepareForReading(size_t bufferSize);
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamHalLocal.h b/media/libaudiohal/4.0/StreamHalLocal.h
deleted file mode 100644
index 7237509..0000000
--- a/media/libaudiohal/4.0/StreamHalLocal.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
-#define ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
-
-#include <media/audiohal/StreamHalInterface.h>
-#include "StreamPowerLog.h"
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalLocal;
-
-class StreamHalLocal : public virtual StreamHalInterface
-{
- public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
- // Return size of input/output buffer in bytes for this stream - eg. 4800.
- virtual status_t getBufferSize(size_t *size);
-
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
-
- // Set audio stream parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get audio stream parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Add or remove the effect on the stream.
- virtual status_t addEffect(sp<EffectHalInterface> effect);
- virtual status_t removeEffect(sp<EffectHalInterface> effect);
-
- // Put the audio hardware input/output into standby mode.
- virtual status_t standby();
-
- virtual status_t dump(int fd);
-
- // Start a stream operating in mmap mode.
- virtual status_t start() = 0;
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop() = 0;
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) = 0;
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
-
- // Set the priority of the thread that interacts with the HAL
- // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
- virtual status_t setHalThreadPriority(int priority);
-
- protected:
- // Subclasses can not be constructed directly by clients.
- StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device);
-
- // The destructor automatically closes the stream.
- virtual ~StreamHalLocal();
-
- sp<DeviceHalLocal> mDevice;
-
- // mStreamPowerLog is used for audio signal power logging.
- StreamPowerLog mStreamPowerLog;
-
- private:
- audio_stream_t *mStream;
-};
-
-class StreamOutHalLocal : public StreamOutHalInterface, public StreamHalLocal {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Return the audio hardware driver estimated latency in milliseconds.
- virtual status_t getLatency(uint32_t *latency);
-
- // Use this method in situations where audio mixing is done in the hardware.
- virtual status_t setVolume(float left, float right);
-
- // Write audio buffer to driver.
- virtual status_t write(const void *buffer, size_t bytes, size_t *written);
-
- // Return the number of audio frames written by the audio dsp to DAC since
- // the output has exited standby.
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-
- // Get the local time at which the next write to the audio driver will be presented.
- virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
- // Set the callback for notifying completion of non-blocking write and drain.
- virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
-
- // Returns whether pause and resume operations are supported.
- virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t pause();
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t resume();
-
- // Returns whether drain operation is supported.
- virtual status_t supportsDrain(bool *supportsDrain);
-
- // Requests notification when data buffered by the driver/hardware has been played.
- virtual status_t drain(bool earlyNotify);
-
- // Notifies to the audio driver to flush the queued data.
- virtual status_t flush();
-
- // Return a recent count of the number of audio frames presented to an external observer.
- virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
-
- // Start a stream operating in mmap mode.
- virtual status_t start();
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop();
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info);
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
- // Called when the metadata of the stream's source has been changed.
- status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
-
- private:
- audio_stream_out_t *mStream;
- wp<StreamOutHalInterfaceCallback> mCallback;
-
- friend class DeviceHalLocal;
-
- // Can not be constructed directly by clients.
- StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device);
-
- virtual ~StreamOutHalLocal();
-
- static int asyncCallback(stream_callback_event_t event, void *param, void *cookie);
-};
-
-class StreamInHalLocal : public StreamInHalInterface, public StreamHalLocal {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Set the input gain for the audio driver.
- virtual status_t setGain(float gain);
-
- // Read audio buffer in from driver.
- virtual status_t read(void *buffer, size_t bytes, size_t *read);
-
- // Return the amount of input frames lost in the audio driver.
- virtual status_t getInputFramesLost(uint32_t *framesLost);
-
- // Return a recent count of the number of audio frames received and
- // the clock time associated with that frame count.
- virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
-
- // Start a stream operating in mmap mode.
- virtual status_t start();
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop();
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info);
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
- // Get active microphones
- virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
- // Called when the metadata of the stream's sink has been changed.
- status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
-
- private:
- audio_stream_in_t *mStream;
-
- friend class DeviceHalLocal;
-
- // Can not be constructed directly by clients.
- StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device);
-
- virtual ~StreamInHalLocal();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamPowerLog.h b/media/libaudiohal/4.0/StreamPowerLog.h
deleted file mode 100644
index 57b7201..0000000
--- a/media/libaudiohal/4.0/StreamPowerLog.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
-#define ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
-
-#include <audio_utils/clock.h>
-#include <audio_utils/PowerLog.h>
-#include <cutils/properties.h>
-#include <system/audio.h>
-
-namespace android {
-namespace V4_0 {
-
-class StreamPowerLog {
-public:
- StreamPowerLog() :
- mIsUserDebugOrEngBuild(is_userdebug_or_eng_build()),
- mPowerLog(nullptr),
- mFrameSize(0) {
- // use init() to set up the power log.
- }
-
- ~StreamPowerLog() {
- power_log_destroy(mPowerLog); // OK for null mPowerLog
- mPowerLog = nullptr;
- }
-
- // A one-time initialization (do not call twice) before using StreamPowerLog.
- void init(uint32_t sampleRate, audio_channel_mask_t channelMask, audio_format_t format) {
- if (mPowerLog == nullptr) {
- // Note: A way to get channel count for both input and output channel masks
- // but does not check validity of the channel mask.
- const uint32_t channelCount = popcount(audio_channel_mask_get_bits(channelMask));
- mFrameSize = channelCount * audio_bytes_per_sample(format);
- if (mFrameSize > 0) {
- const size_t kPowerLogFramesPerEntry =
- (long long)sampleRate * kPowerLogSamplingIntervalMs / 1000;
- mPowerLog = power_log_create(
- sampleRate,
- channelCount,
- format,
- kPowerLogEntries,
- kPowerLogFramesPerEntry);
- }
- }
- // mPowerLog may be NULL (not the right build, format not accepted, etc.).
- }
-
- // Dump the power log to fd.
- void dump(int fd) const {
- // OK for null mPowerLog
- (void)power_log_dump(
- mPowerLog, fd, " " /* prefix */, kPowerLogLines, 0 /* limit_ns */);
- }
-
- // Log the audio data contained in buffer.
- void log(const void *buffer, size_t sizeInBytes) const {
- if (mPowerLog != nullptr) { // mFrameSize is always nonzero if mPowerLog exists.
- power_log_log(
- mPowerLog, buffer, sizeInBytes / mFrameSize, audio_utils_get_real_time_ns());
- }
- }
-
- bool isUserDebugOrEngBuild() const {
- return mIsUserDebugOrEngBuild;
- }
-
-private:
-
- static inline bool is_userdebug_or_eng_build() {
- char value[PROPERTY_VALUE_MAX];
- (void)property_get("ro.build.type", value, "unknown"); // ignore actual length
- return strcmp(value, "userdebug") == 0 || strcmp(value, "eng") == 0;
- }
-
- // Audio signal power log configuration.
- static const size_t kPowerLogLines = 40;
- static const size_t kPowerLogSamplingIntervalMs = 50;
- static const size_t kPowerLogEntries = (1 /* minutes */ * 60 /* seconds */ * 1000 /* msec */
- / kPowerLogSamplingIntervalMs);
-
- const bool mIsUserDebugOrEngBuild;
- power_log_t *mPowerLog;
- size_t mFrameSize;
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
diff --git a/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h b/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h
deleted file mode 100644
index abf6de0..0000000
--- a/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
-
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-namespace android {
-namespace V4_0 {
-
-class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
-{
- public:
- // Opens a device with the specified name. To close the device, it is
- // necessary to release references to the returned object.
- virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
- private:
- friend class DevicesFactoryHalInterface;
-
- // Can not be constructed directly by clients.
- DevicesFactoryHalHybrid();
-
- virtual ~DevicesFactoryHalHybrid();
-
- sp<DevicesFactoryHalInterface> mLocalFactory;
- sp<DevicesFactoryHalInterface> mHidlFactory;
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
diff --git a/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h b/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h
deleted file mode 100644
index 680b7a1..0000000
--- a/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/4.0/types.h>
-#include <media/audiohal/EffectsFactoryHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-
-namespace android {
-namespace V4_0 {
-
-using ::android::hardware::audio::effect::V4_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V4_0::IEffectsFactory;
-using ::android::hardware::hidl_vec;
-
-class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
-{
- public:
- // Returns the number of different effects in all loaded libraries.
- virtual status_t queryNumberEffects(uint32_t *pNumEffects);
-
- // Returns a descriptor of the next available effect.
- virtual status_t getDescriptor(uint32_t index,
- effect_descriptor_t *pDescriptor);
-
- virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
- effect_descriptor_t *pDescriptor);
-
- // Creates an effect engine of the specified type.
- // To release the effect engine, it is necessary to release references
- // to the returned effect object.
- virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
- int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect);
-
- virtual status_t dumpEffects(int fd);
-
- status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) override;
- status_t mirrorBuffer(void* external, size_t size,
- sp<EffectBufferHalInterface>* buffer) override;
-
- private:
- friend class EffectsFactoryHalInterface;
-
- sp<IEffectsFactory> mEffectsFactory;
- hidl_vec<EffectDescriptor> mLastDescriptors;
-
- // Can not be constructed directly by clients.
- EffectsFactoryHalHidl();
- virtual ~EffectsFactoryHalHidl();
-
- status_t queryAllDescriptors();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 3a5df27..0ff0d4a 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -8,6 +8,7 @@
cflags: [
"-Wall",
+ "-Wextra",
"-Werror",
],
diff --git a/media/libaudiohal/DevicesFactoryHalInterface.cpp b/media/libaudiohal/DevicesFactoryHalInterface.cpp
index 4c8eaf6..e631ace 100644
--- a/media/libaudiohal/DevicesFactoryHalInterface.cpp
+++ b/media/libaudiohal/DevicesFactoryHalInterface.cpp
@@ -17,18 +17,17 @@
#include <android/hardware/audio/2.0/IDevicesFactory.h>
#include <android/hardware/audio/4.0/IDevicesFactory.h>
-#include <DevicesFactoryHalHybrid.h>
-#include <libaudiohal/4.0/DevicesFactoryHalHybrid.h>
+#include <libaudiohal/FactoryHalHidl.h>
namespace android {
// static
sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
if (hardware::audio::V4_0::IDevicesFactory::getService() != nullptr) {
- return new V4_0::DevicesFactoryHalHybrid();
+ return V4_0::createDevicesFactoryHal();
}
if (hardware::audio::V2_0::IDevicesFactory::getService() != nullptr) {
- return new DevicesFactoryHalHybrid();
+ return V2_0::createDevicesFactoryHal();
}
return nullptr;
}
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
index ead1fa2..f7734a8 100644
--- a/media/libaudiohal/EffectsFactoryHalInterface.cpp
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -17,19 +17,17 @@
#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
-#include <EffectsFactoryHalHidl.h>
-#include <libaudiohal/4.0/EffectsFactoryHalHidl.h>
-
+#include <libaudiohal/FactoryHalHidl.h>
namespace android {
// static
sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
if (hardware::audio::effect::V4_0::IEffectsFactory::getService() != nullptr) {
- return new V4_0::EffectsFactoryHalHidl();
+ return V4_0::createEffectsFactoryHal();
}
if (hardware::audio::effect::V2_0::IEffectsFactory::getService() != nullptr) {
- return new EffectsFactoryHalHidl();
+ return V2_0::createEffectsFactoryHal();
}
return nullptr;
}
diff --git a/media/libaudiohal/4.0/Android.bp b/media/libaudiohal/impl/Android.bp
similarity index 68%
rename from media/libaudiohal/4.0/Android.bp
rename to media/libaudiohal/impl/Android.bp
index 833defa..3827336 100644
--- a/media/libaudiohal/4.0/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -1,5 +1,5 @@
-cc_library_shared {
- name: "libaudiohal@4.0",
+cc_defaults {
+ name: "libaudiohal_default",
srcs: [
"DeviceHalLocal.cpp",
@@ -24,28 +24,30 @@
"-Werror",
],
shared_libs: [
+ "android.hardware.audio.common-util",
+ "android.hardware.audio.common@2.0",
+ "android.hardware.audio.common@4.0",
+ "android.hardware.audio.effect@2.0",
+ "android.hardware.audio.effect@4.0",
+ "android.hardware.audio@2.0",
+ "android.hardware.audio@4.0",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
"libaudiohal_deathhandler",
"libaudioutils",
+ "libbase",
"libbinder",
"libcutils",
- "liblog",
- "libutils",
- "libhardware",
- "libbase",
"libfmq",
- "libhwbinder",
+ "libhardware",
"libhidlbase",
"libhidlmemory",
"libhidltransport",
- "android.hardware.audio@4.0",
- "android.hardware.audio.common-util",
- "android.hardware.audio.common@4.0",
- "android.hardware.audio.common@4.0-util",
- "android.hardware.audio.effect@4.0",
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
+ "libhwbinder",
+ "liblog",
"libmedia_helper",
"libmediautils",
+ "libutils",
],
header_libs: [
"android.hardware.audio.common.util@all-versions",
@@ -56,3 +58,29 @@
"libfmq",
],
}
+
+cc_library_shared {
+ name: "libaudiohal@2.0",
+ defaults: ["libaudiohal_default"],
+ shared_libs: [
+ "android.hardware.audio.common@2.0-util",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=2",
+ "-DMINOR_VERSION=0",
+ "-include VersionMacro.h",
+ ]
+}
+
+cc_library_shared {
+ name: "libaudiohal@4.0",
+ defaults: ["libaudiohal_default"],
+ shared_libs: [
+ "android.hardware.audio.common@4.0-util",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=4",
+ "-DMINOR_VERSION=0",
+ "-include VersionMacro.h",
+ ]
+}
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
similarity index 90%
rename from media/libaudiohal/4.0/ConversionHelperHidl.cpp
rename to media/libaudiohal/impl/ConversionHelperHidl.cpp
index fe27504..5d12fad 100644
--- a/media/libaudiohal/4.0/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -22,15 +22,18 @@
#include "ConversionHelperHidl.h"
-using ::android::hardware::audio::V4_0::AudioMicrophoneChannelMapping;
-using ::android::hardware::audio::V4_0::AudioMicrophoneDirectionality;
-using ::android::hardware::audio::V4_0::AudioMicrophoneLocation;
-using ::android::hardware::audio::V4_0::DeviceAddress;
-using ::android::hardware::audio::V4_0::MicrophoneInfo;
-using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::audio::CPP_VERSION::Result;
+
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneChannelMapping;
+using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneDirectionality;
+using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneLocation;
+using ::android::hardware::audio::CPP_VERSION::DeviceAddress;
+using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
+#endif
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
// static
status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
@@ -106,8 +109,9 @@
ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
}
+#if MAJOR_VERSION == 4
// TODO: Use the same implementation in the hal when it moves to a util library.
-std::string deviceAddressToHal(const DeviceAddress& address) {
+static std::string deviceAddressToHal(const DeviceAddress& address) {
// HAL assumes that the address is NUL-terminated.
char halAddress[AUDIO_DEVICE_MAX_ADDRESS_LEN];
memset(halAddress, 0, sizeof(halAddress));
@@ -141,7 +145,7 @@
//local conversion helpers
-audio_microphone_channel_mapping_t channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
+static audio_microphone_channel_mapping_t channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
switch (mapping) {
case AudioMicrophoneChannelMapping::UNUSED:
return AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
@@ -154,7 +158,7 @@
}
}
-audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
+static audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
switch (location) {
case AudioMicrophoneLocation::UNKNOWN:
return AUDIO_MICROPHONE_LOCATION_UNKNOWN;
@@ -168,7 +172,7 @@
LOG_ALWAYS_FATAL("Unknown locationToHal conversion %d", location);
}
}
-audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
+static audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
switch (dir) {
case AudioMicrophoneDirectionality::UNKNOWN:
return AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN;
@@ -187,9 +191,8 @@
}
}
-// static
-void ConversionHelperHidl::microphoneInfoToHal(const MicrophoneInfo& src,
- audio_microphone_characteristic_t *pDst) {
+void microphoneInfoToHal(const MicrophoneInfo& src,
+ audio_microphone_characteristic_t *pDst) {
if (pDst != NULL) {
snprintf(pDst->device_id, sizeof(pDst->device_id),
"%s", src.deviceId.c_str());
@@ -232,6 +235,7 @@
pDst->orientation.z = src.orientation.z;
}
}
+#endif
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/ConversionHelperHidl.h b/media/libaudiohal/impl/ConversionHelperHidl.h
similarity index 78%
rename from media/libaudiohal/2.0/ConversionHelperHidl.h
rename to media/libaudiohal/impl/ConversionHelperHidl.h
index c356f37..1a9319f 100644
--- a/media/libaudiohal/2.0/ConversionHelperHidl.h
+++ b/media/libaudiohal/impl/ConversionHelperHidl.h
@@ -18,15 +18,20 @@
#define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
#include <android/hardware/audio/2.0/types.h>
+#include <android/hardware/audio/4.0/types.h>
#include <hidl/HidlSupport.h>
+#include <system/audio.h>
#include <utils/String8.h>
-using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using CoreResult = ::android::hardware::audio::CPP_VERSION::Result;
+
using ::android::hardware::Return;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
namespace android {
+namespace CPP_VERSION {
class ConversionHelperHidl {
protected:
@@ -54,7 +59,7 @@
return ret.isOk() ? OK : FAILED_TRANSACTION;
}
- status_t processReturn(const char* funcName, const Return<hardware::audio::V2_0::Result>& ret) {
+ status_t processReturn(const char* funcName, const Return<CoreResult>& ret) {
if (!ret.isOk()) {
emitError(funcName, ret.description().c_str());
}
@@ -63,7 +68,7 @@
template<typename T>
status_t processReturn(
- const char* funcName, const Return<T>& ret, hardware::audio::V2_0::Result retval) {
+ const char* funcName, const Return<T>& ret, CoreResult retval) {
if (!ret.isOk()) {
emitError(funcName, ret.description().c_str());
}
@@ -73,11 +78,18 @@
private:
const char* mClassName;
- static status_t analyzeResult(const hardware::audio::V2_0::Result& result);
+ static status_t analyzeResult(const CoreResult& result);
void emitError(const char* funcName, const char* description);
};
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
+void microphoneInfoToHal(const MicrophoneInfo& src,
+ audio_microphone_characteristic_t *pDst);
+#endif
+
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
similarity index 88%
rename from media/libaudiohal/4.0/DeviceHalHidl.cpp
rename to media/libaudiohal/impl/DeviceHalHidl.cpp
index 6facca9..723e2eb 100644
--- a/media/libaudiohal/4.0/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -19,6 +19,7 @@
#define LOG_TAG "DeviceHalHidl"
//#define LOG_NDEBUG 0
+#include <android/hardware/audio/2.0/IPrimaryDevice.h>
#include <android/hardware/audio/4.0/IPrimaryDevice.h>
#include <cutils/native_handle.h>
#include <hwbinder/IPCThreadState.h>
@@ -31,27 +32,30 @@
#include "StreamHalHidl.h"
#include "VersionUtils.h"
-using ::android::hardware::audio::common::V4_0::AudioConfig;
-using ::android::hardware::audio::common::V4_0::AudioDevice;
-using ::android::hardware::audio::common::V4_0::AudioInputFlag;
-using ::android::hardware::audio::common::V4_0::AudioOutputFlag;
-using ::android::hardware::audio::common::V4_0::AudioPatchHandle;
-using ::android::hardware::audio::common::V4_0::AudioPort;
-using ::android::hardware::audio::common::V4_0::AudioPortConfig;
-using ::android::hardware::audio::common::V4_0::AudioMode;
-using ::android::hardware::audio::common::V4_0::AudioSource;
-using ::android::hardware::audio::common::V4_0::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::AudioConfig;
+using ::android::hardware::audio::common::CPP_VERSION::AudioDevice;
+using ::android::hardware::audio::common::CPP_VERSION::AudioInputFlag;
+using ::android::hardware::audio::common::CPP_VERSION::AudioOutputFlag;
+using ::android::hardware::audio::common::CPP_VERSION::AudioPatchHandle;
+using ::android::hardware::audio::common::CPP_VERSION::AudioPort;
+using ::android::hardware::audio::common::CPP_VERSION::AudioPortConfig;
+using ::android::hardware::audio::common::CPP_VERSION::AudioMode;
+using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
+using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
using ::android::hardware::audio::common::utils::mkEnumConverter;
-using ::android::hardware::audio::V4_0::DeviceAddress;
-using ::android::hardware::audio::V4_0::IPrimaryDevice;
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::Result;
-using ::android::hardware::audio::V4_0::SinkMetadata;
+using ::android::hardware::audio::CPP_VERSION::DeviceAddress;
+using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::Result;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::CPP_VERSION::SinkMetadata;
+#endif
+
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
namespace {
@@ -259,7 +263,9 @@
hidlDevice,
hidlConfig,
mkEnumConverter<AudioOutputFlag>(flags),
+#if MAJOR_VERSION == 4
{} /* metadata */,
+#endif
[&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
retval = r;
if (retval == Result::OK) {
@@ -285,15 +291,19 @@
AudioConfig hidlConfig;
HidlUtils::audioConfigFromHal(*config, &hidlConfig);
Result retval = Result::NOT_INITIALIZED;
+#if MAJOR_VERSION == 2
+ auto sourceMetadata = AudioSource(source);
+#elif MAJOR_VERSION == 4
// TODO: correctly propagate the tracks sources and volume
// for now, only send the main source at 1dbfs
- SinkMetadata metadata = {{{AudioSource(source), 1}}};
+ SinkMetadata sourceMetadata = {{{AudioSource(source), 1}}};
+#endif
Return<void> ret = mDevice->openInputStream(
handle,
hidlDevice,
hidlConfig,
- flags,
- metadata,
+ mkEnumConverter<AudioInputFlag>(flags),
+ sourceMetadata,
[&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
retval = r;
if (retval == Result::OK) {
@@ -359,6 +369,13 @@
return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
}
+#if MAJOR_VERSION == 2
+status_t DeviceHalHidl::getMicrophones(
+ std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
+ if (mDevice == 0) return NO_INIT;
+ return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
status_t DeviceHalHidl::getMicrophones(std::vector<media::MicrophoneInfo> *microphonesInfo) {
if (mDevice == 0) return NO_INIT;
Result retval;
@@ -375,6 +392,7 @@
});
return processReturn("getMicrophones", ret, retval);
}
+#endif
status_t DeviceHalHidl::dump(int fd) {
if (mDevice == 0) return NO_INIT;
@@ -385,5 +403,5 @@
return processReturn("dump", ret);
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
similarity index 94%
rename from media/libaudiohal/2.0/DeviceHalHidl.h
rename to media/libaudiohal/impl/DeviceHalHidl.h
index 3c1cb59..fb5e7e7 100644
--- a/media/libaudiohal/2.0/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -18,16 +18,19 @@
#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
#include <android/hardware/audio/2.0/IDevice.h>
+#include <android/hardware/audio/4.0/IDevice.h>
#include <android/hardware/audio/2.0/IPrimaryDevice.h>
+#include <android/hardware/audio/4.0/IPrimaryDevice.h>
#include <media/audiohal/DeviceHalInterface.h>
#include "ConversionHelperHidl.h"
-using ::android::hardware::audio::V2_0::IDevice;
-using ::android::hardware::audio::V2_0::IPrimaryDevice;
+using ::android::hardware::audio::CPP_VERSION::IDevice;
+using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
using ::android::hardware::Return;
namespace android {
+namespace CPP_VERSION {
class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
{
@@ -124,6 +127,7 @@
virtual ~DeviceHalHidl();
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
similarity index 96%
rename from media/libaudiohal/4.0/DeviceHalLocal.cpp
rename to media/libaudiohal/impl/DeviceHalLocal.cpp
index a245dd9..14e26f5 100644
--- a/media/libaudiohal/4.0/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -23,7 +23,7 @@
#include "StreamHalLocal.h"
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
: mDev(dev) {
@@ -185,6 +185,12 @@
return INVALID_OPERATION;
}
+#if MAJOR_VERSION == 2
+status_t DeviceHalLocal::getMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
status_t DeviceHalLocal::getMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
if (mDev->get_microphones == NULL) return INVALID_OPERATION;
size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
@@ -196,6 +202,7 @@
}
return status;
}
+#endif
status_t DeviceHalLocal::dump(int fd) {
return mDev->dump(mDev, fd);
@@ -209,5 +216,5 @@
mDev->close_input_stream(mDev, stream_in);
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
similarity index 98%
rename from media/libaudiohal/2.0/DeviceHalLocal.h
rename to media/libaudiohal/impl/DeviceHalLocal.h
index aec201a..18bd879 100644
--- a/media/libaudiohal/2.0/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -21,6 +21,7 @@
#include <media/audiohal/DeviceHalInterface.h>
namespace android {
+namespace CPP_VERSION {
class DeviceHalLocal : public DeviceHalInterface
{
@@ -122,6 +123,7 @@
uint32_t version() const { return mDev->common.version; }
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
new file mode 100644
index 0000000..28001da
--- /dev/null
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+#include <vector>
+
+#define LOG_TAG "DevicesFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IDevice.h>
+#include <android/hardware/audio/4.0/IDevice.h>
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "DeviceHalHidl.h"
+#include "DevicesFactoryHalHidl.h"
+
+using ::android::hardware::audio::CPP_VERSION::IDevice;
+using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::Return;
+
+namespace android {
+namespace CPP_VERSION {
+
+DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
+ sp<IDevicesFactory> defaultFactory{IDevicesFactory::getService()};
+ if (!defaultFactory) {
+ ALOGE("Failed to obtain IDevicesFactory/default service, terminating process.");
+ exit(1);
+ }
+ mDeviceFactories.push_back(defaultFactory);
+ if (MAJOR_VERSION >= 4) {
+ // The MSD factory is optional and only available starting at HAL 4.0
+ sp<IDevicesFactory> msdFactory{IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD)};
+ if (msdFactory) {
+ mDeviceFactories.push_back(msdFactory);
+ }
+ }
+ for (const auto& factory : mDeviceFactories) {
+ // It is assumed that the DevicesFactoryHalInterface instance is owned
+ // by AudioFlinger and thus have the same lifespan.
+ factory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+ }
+}
+
+
+#if MAJOR_VERSION == 2
+static IDevicesFactory::Device idFromHal(const char *name, status_t* status) {
+ *status = OK;
+ if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
+ return IDevicesFactory::Device::PRIMARY;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
+ return IDevicesFactory::Device::A2DP;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
+ return IDevicesFactory::Device::USB;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
+ return IDevicesFactory::Device::R_SUBMIX;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
+ return IDevicesFactory::Device::STUB;
+ }
+ ALOGE("Invalid device name %s", name);
+ *status = BAD_VALUE;
+ return {};
+}
+#elif MAJOR_VERSION == 4
+static const char* idFromHal(const char *name, status_t* status) {
+ *status = OK;
+ return name;
+}
+#endif
+
+status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ if (mDeviceFactories.empty()) return NO_INIT;
+ status_t status;
+ auto hidlId = idFromHal(name, &status);
+ if (status != OK) return status;
+ Result retval = Result::NOT_INITIALIZED;
+ for (const auto& factory : mDeviceFactories) {
+ Return<void> ret = factory->openDevice(
+ hidlId,
+ [&](Result r, const sp<IDevice>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ *device = new DeviceHalHidl(result);
+ }
+ });
+ if (!ret.isOk()) return FAILED_TRANSACTION;
+ switch (retval) {
+ // Device was found and was initialized successfully.
+ case Result::OK: return OK;
+ // Device was found but failed to initalize.
+ case Result::NOT_INITIALIZED: return NO_INIT;
+ // Otherwise continue iterating.
+ default: ;
+ }
+ }
+ ALOGW("The specified device name is not recognized: \"%s\"", name);
+ return BAD_VALUE;
+}
+
+} // namespace CPP_VERSION
+} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
similarity index 84%
rename from media/libaudiohal/2.0/DevicesFactoryHalHidl.h
rename to media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 0748849..a4282b0 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -18,15 +18,17 @@
#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
#include <android/hardware/audio/2.0/IDevicesFactory.h>
+#include <android/hardware/audio/4.0/IDevicesFactory.h>
#include <media/audiohal/DevicesFactoryHalInterface.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include "DeviceHalHidl.h"
-using ::android::hardware::audio::V2_0::IDevicesFactory;
+using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
namespace android {
+namespace CPP_VERSION {
class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
{
@@ -38,17 +40,15 @@
private:
friend class DevicesFactoryHalHybrid;
- sp<IDevicesFactory> mDevicesFactory;
- sp<IDevicesFactory> mDevicesFactoryMsd;
-
- static status_t nameFromHal(const char *name, IDevicesFactory::Device *device);
+ std::vector<sp<IDevicesFactory>> mDeviceFactories;
// Can not be constructed directly by clients.
DevicesFactoryHalHidl();
- virtual ~DevicesFactoryHalHidl();
+ virtual ~DevicesFactoryHalHidl() = default;
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
similarity index 95%
rename from media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
rename to media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
index 1c4be74..f337a8b 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
@@ -22,15 +22,13 @@
#include "DevicesFactoryHalHidl.h"
namespace android {
+namespace CPP_VERSION {
DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
: mLocalFactory(new DevicesFactoryHalLocal()),
mHidlFactory(new DevicesFactoryHalHidl()) {
}
-DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
-}
-
status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
@@ -39,4 +37,5 @@
return mLocalFactory->openDevice(name, device);
}
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
similarity index 89%
rename from media/libaudiohal/2.0/DevicesFactoryHalHybrid.h
rename to media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index abd57d6..5ac0d0d 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -22,26 +22,27 @@
#include <utils/RefBase.h>
namespace android {
+namespace CPP_VERSION {
class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
{
public:
+ DevicesFactoryHalHybrid();
+
// Opens a device with the specified name. To close the device, it is
// necessary to release references to the returned object.
virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
private:
- friend class DevicesFactoryHalInterface;
-
- // Can not be constructed directly by clients.
- DevicesFactoryHalHybrid();
-
- virtual ~DevicesFactoryHalHybrid();
-
sp<DevicesFactoryHalInterface> mLocalFactory;
sp<DevicesFactoryHalInterface> mHidlFactory;
};
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal() {
+ return new DevicesFactoryHalHybrid();
+}
+
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
similarity index 97%
rename from media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp
rename to media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
index e54edd4..af67ff5 100644
--- a/media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
@@ -26,7 +26,7 @@
#include "DevicesFactoryHalLocal.h"
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
{
@@ -67,5 +67,5 @@
return rc;
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalLocal.h b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
similarity index 96%
rename from media/libaudiohal/2.0/DevicesFactoryHalLocal.h
rename to media/libaudiohal/impl/DevicesFactoryHalLocal.h
index b9d18ab..5d108dd 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalLocal.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
@@ -24,6 +24,7 @@
#include "DeviceHalLocal.h"
namespace android {
+namespace CPP_VERSION {
class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
{
@@ -41,6 +42,7 @@
virtual ~DevicesFactoryHalLocal() {}
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/4.0/EffectBufferHalHidl.cpp b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
similarity index 98%
rename from media/libaudiohal/4.0/EffectBufferHalHidl.cpp
rename to media/libaudiohal/impl/EffectBufferHalHidl.cpp
index 957c89f..6ef4e8a 100644
--- a/media/libaudiohal/4.0/EffectBufferHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
@@ -30,7 +30,7 @@
using ::android::hidl::allocator::V1_0::IAllocator;
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
// static
uint64_t EffectBufferHalHidl::makeUniqueId() {
@@ -142,5 +142,5 @@
memcpy(mExternalData, mAudioBuffer.raw, size);
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/EffectBufferHalHidl.h b/media/libaudiohal/impl/EffectBufferHalHidl.h
similarity index 92%
rename from media/libaudiohal/2.0/EffectBufferHalHidl.h
rename to media/libaudiohal/impl/EffectBufferHalHidl.h
index 31e0087..029d71a 100644
--- a/media/libaudiohal/2.0/EffectBufferHalHidl.h
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.h
@@ -18,16 +18,18 @@
#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
#include <android/hardware/audio/effect/2.0/types.h>
+#include <android/hardware/audio/effect/4.0/types.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidl/HidlSupport.h>
#include <media/audiohal/EffectBufferHalInterface.h>
#include <system/audio_effect.h>
-using android::hardware::audio::effect::V2_0::AudioBuffer;
+using android::hardware::audio::effect::CPP_VERSION::AudioBuffer;
using android::hardware::hidl_memory;
using android::hidl::memory::V1_0::IMemory;
namespace android {
+namespace CPP_VERSION {
class EffectBufferHalHidl : public EffectBufferHalInterface
{
@@ -71,6 +73,7 @@
status_t init();
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
similarity index 95%
rename from media/libaudiohal/4.0/EffectHalHidl.cpp
rename to media/libaudiohal/impl/EffectHalHidl.cpp
index c99c4c8..12649a1 100644
--- a/media/libaudiohal/4.0/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -22,26 +22,25 @@
#include <media/EffectsFactoryApi.h>
#include <utils/Log.h>
-#include "ConversionHelperHidl.h"
#include "EffectBufferHalHidl.h"
#include "EffectHalHidl.h"
#include "HidlUtils.h"
-using ::android::hardware::audio::effect::V4_0::AudioBuffer;
-using ::android::hardware::audio::effect::V4_0::EffectBufferAccess;
-using ::android::hardware::audio::effect::V4_0::EffectConfigParameters;
-using ::android::hardware::audio::effect::V4_0::MessageQueueFlagBits;
-using ::android::hardware::audio::effect::V4_0::Result;
-using ::android::hardware::audio::common::V4_0::HidlUtils;
-using ::android::hardware::audio::common::V4_0::AudioChannelMask;
-using ::android::hardware::audio::common::V4_0::AudioFormat;
+using ::android::hardware::audio::effect::CPP_VERSION::AudioBuffer;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectBufferAccess;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectConfigParameters;
+using ::android::hardware::audio::effect::CPP_VERSION::MessageQueueFlagBits;
+using ::android::hardware::audio::effect::CPP_VERSION::Result;
+using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
+using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
using ::android::hardware::audio::common::utils::mkEnumConverter;
using ::android::hardware::hidl_vec;
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
: mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
@@ -338,5 +337,5 @@
return result;
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
similarity index 85%
rename from media/libaudiohal/2.0/EffectHalHidl.h
rename to media/libaudiohal/impl/EffectHalHidl.h
index 6ffdaf1..04f40d3 100644
--- a/media/libaudiohal/2.0/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -18,19 +18,22 @@
#define ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
#include <android/hardware/audio/effect/2.0/IEffect.h>
+#include <android/hardware/audio/effect/4.0/IEffect.h>
#include <media/audiohal/EffectHalInterface.h>
#include <fmq/EventFlag.h>
#include <fmq/MessageQueue.h>
#include <system/audio_effect.h>
-using ::android::hardware::audio::effect::V2_0::EffectBufferConfig;
-using ::android::hardware::audio::effect::V2_0::EffectConfig;
-using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V2_0::IEffect;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectBufferConfig;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectConfig;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectDescriptor;
+using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
+using EffectResult = ::android::hardware::audio::effect::CPP_VERSION::Result;
using ::android::hardware::EventFlag;
using ::android::hardware::MessageQueue;
namespace android {
+namespace CPP_VERSION {
class EffectHalHidl : public EffectHalInterface
{
@@ -68,8 +71,7 @@
private:
friend class EffectsFactoryHalHidl;
- typedef MessageQueue<
- hardware::audio::effect::V2_0::Result, hardware::kSynchronizedReadWrite> StatusMQ;
+ typedef MessageQueue<EffectResult, hardware::kSynchronizedReadWrite> StatusMQ;
sp<IEffect> mEffect;
const uint64_t mEffectId;
@@ -79,7 +81,7 @@
std::unique_ptr<StatusMQ> mStatusMQ;
EventFlag* mEfGroup;
- static status_t analyzeResult(const hardware::audio::effect::V2_0::Result& result);
+ static status_t analyzeResult(const EffectResult& result);
static void effectBufferConfigFromHal(
const buffer_config_t& halConfig, EffectBufferConfig* config);
static void effectBufferConfigToHal(
@@ -103,6 +105,7 @@
status_t setProcessBuffers();
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
similarity index 93%
rename from media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp
rename to media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index dfed784..b880433 100644
--- a/media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -18,21 +18,21 @@
//#define LOG_NDEBUG 0
#include <cutils/native_handle.h>
-#include <libaudiohal/4.0/EffectsFactoryHalHidl.h>
+#include "EffectsFactoryHalHidl.h"
#include "ConversionHelperHidl.h"
#include "EffectBufferHalHidl.h"
#include "EffectHalHidl.h"
#include "HidlUtils.h"
-using ::android::hardware::audio::common::V4_0::HidlUtils;
-using ::android::hardware::audio::common::V4_0::Uuid;
-using ::android::hardware::audio::effect::V4_0::IEffect;
-using ::android::hardware::audio::effect::V4_0::Result;
+using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::Uuid;
+using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
+using ::android::hardware::audio::effect::CPP_VERSION::Result;
using ::android::hardware::Return;
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
mEffectsFactory = IEffectsFactory::getService();
@@ -42,9 +42,6 @@
}
}
-EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
-}
-
status_t EffectsFactoryHalHidl::queryAllDescriptors() {
if (mEffectsFactory == 0) return NO_INIT;
Result retval = Result::NOT_INITIALIZED;
@@ -148,5 +145,5 @@
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
similarity index 84%
rename from media/libaudiohal/2.0/EffectsFactoryHalHidl.h
rename to media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 82b5481..c6fced7 100644
--- a/media/libaudiohal/2.0/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -18,20 +18,25 @@
#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
#include <android/hardware/audio/effect/2.0/types.h>
+#include <android/hardware/audio/effect/4.0/types.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include "ConversionHelperHidl.h"
namespace android {
+namespace CPP_VERSION {
-using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V2_0::IEffectsFactory;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectDescriptor;
+using ::android::hardware::audio::effect::CPP_VERSION::IEffectsFactory;
using ::android::hardware::hidl_vec;
class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
{
public:
+ EffectsFactoryHalHidl();
+
// Returns the number of different effects in all loaded libraries.
virtual status_t queryNumberEffects(uint32_t *pNumEffects);
@@ -56,18 +61,17 @@
sp<EffectBufferHalInterface>* buffer) override;
private:
- friend class EffectsFactoryHalInterface;
-
sp<IEffectsFactory> mEffectsFactory;
hidl_vec<EffectDescriptor> mLastDescriptors;
- // Can not be constructed directly by clients.
- EffectsFactoryHalHidl();
- virtual ~EffectsFactoryHalHidl();
-
status_t queryAllDescriptors();
};
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal() {
+ return new EffectsFactoryHalHidl();
+}
+
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
similarity index 91%
rename from media/libaudiohal/4.0/StreamHalHidl.cpp
rename to media/libaudiohal/impl/StreamHalHidl.cpp
index 1c2fdb0..bfa80e8 100644
--- a/media/libaudiohal/4.0/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "StreamHalHidl"
//#define LOG_NDEBUG 0
+#include <android/hardware/audio/2.0/IStreamOutCallback.h>
#include <android/hardware/audio/4.0/IStreamOutCallback.h>
#include <hwbinder/IPCThreadState.h>
#include <mediautils/SchedulingPolicyService.h>
@@ -27,30 +28,33 @@
#include "StreamHalHidl.h"
#include "VersionUtils.h"
-using ::android::hardware::audio::common::V4_0::AudioChannelMask;
-using ::android::hardware::audio::common::V4_0::AudioContentType;
-using ::android::hardware::audio::common::V4_0::AudioFormat;
-using ::android::hardware::audio::common::V4_0::AudioSource;
-using ::android::hardware::audio::common::V4_0::AudioUsage;
-using ::android::hardware::audio::common::V4_0::ThreadInfo;
-using ::android::hardware::audio::V4_0::AudioDrain;
-using ::android::hardware::audio::V4_0::IStreamOutCallback;
-using ::android::hardware::audio::V4_0::MessageQueueFlagBits;
-using ::android::hardware::audio::V4_0::MicrophoneInfo;
-using ::android::hardware::audio::V4_0::MmapBufferInfo;
-using ::android::hardware::audio::V4_0::MmapPosition;
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::PlaybackTrackMetadata;
-using ::android::hardware::audio::V4_0::RecordTrackMetadata;
-using ::android::hardware::audio::V4_0::Result;
-using ::android::hardware::audio::V4_0::TimeSpec;
+using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
+using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
+using ::android::hardware::audio::common::CPP_VERSION::ThreadInfo;
+using ::android::hardware::audio::CPP_VERSION::AudioDrain;
+using ::android::hardware::audio::CPP_VERSION::IStreamOutCallback;
+using ::android::hardware::audio::CPP_VERSION::MessageQueueFlagBits;
+using ::android::hardware::audio::CPP_VERSION::MmapBufferInfo;
+using ::android::hardware::audio::CPP_VERSION::MmapPosition;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::CPP_VERSION::TimeSpec;
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
using ::android::hardware::Void;
-using ReadCommand = ::android::hardware::audio::V4_0::IStreamIn::ReadCommand;
+using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
+
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::common::CPP_VERSION::AudioContentType;
+using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
+using ::android::hardware::audio::common::CPP_VERSION::AudioUsage;
+using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
+using ::android::hardware::audio::CPP_VERSION::PlaybackTrackMetadata;
+using ::android::hardware::audio::CPP_VERSION::RecordTrackMetadata;
+#endif
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
StreamHalHidl::StreamHalHidl(IStream *stream)
: ConversionHelperHidl("Stream"),
@@ -188,7 +192,17 @@
const native_handle *handle = hidlInfo.sharedMemory.handle();
if (handle->numFds > 0) {
info->shared_memory_fd = handle->data[0];
+#if MAJOR_VERSION == 4
+ info->flags = audio_mmap_buffer_flag(hidlInfo.flags);
+#endif
info->buffer_size_frames = hidlInfo.bufferSizeFrames;
+ // Negative buffer size frame was a hack in O and P to
+ // indicate that the buffer is shareable to applications
+ if (info->buffer_size_frames < 0) {
+ info->buffer_size_frames *= -1;
+ info->flags = audio_mmap_buffer_flag(
+ info->flags | AUDIO_MMAP_APPLICATION_SHAREABLE);
+ }
info->burst_size_frames = hidlInfo.burstSizeFrames;
// info->shared_memory_address is not needed in HIDL context
info->shared_memory_address = NULL;
@@ -566,6 +580,12 @@
}
}
+#if MAJOR_VERSION == 2
+status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
+ // Audio HAL V2.0 does not support propagating source metadata
+ return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
/** Transform a standard collection to an HIDL vector. */
template <class Values, class ElementConverter>
static auto transformToHidlVec(const Values& values, ElementConverter converter) {
@@ -576,7 +596,7 @@
}
status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
- hardware::audio::V4_0::SourceMetadata halMetadata = {
+ hardware::audio::CPP_VERSION::SourceMetadata halMetadata = {
.tracks = transformToHidlVec(sourceMetadata.tracks,
[](const playback_track_metadata& metadata) -> PlaybackTrackMetadata {
return {
@@ -587,6 +607,7 @@
})};
return processReturn("updateSourceMetadata", mStream->updateSourceMetadata(halMetadata));
}
+#endif
void StreamOutHalHidl::onWriteReady() {
sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
@@ -782,7 +803,19 @@
}
}
+#if MAJOR_VERSION == 2
+status_t StreamInHalHidl::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ if (mStream == 0) return NO_INIT;
+ return INVALID_OPERATION;
+}
+status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
+ // Audio HAL V2.0 does not support propagating sink metadata
+ return INVALID_OPERATION;
+}
+
+#elif MAJOR_VERSION == 4
status_t StreamInHalHidl::getActiveMicrophones(
std::vector<media::MicrophoneInfo> *microphonesInfo) {
if (!mStream) return NO_INIT;
@@ -802,7 +835,7 @@
}
status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
- hardware::audio::V4_0::SinkMetadata halMetadata = {
+ hardware::audio::CPP_VERSION::SinkMetadata halMetadata = {
.tracks = transformToHidlVec(sinkMetadata.tracks,
[](const record_track_metadata& metadata) -> RecordTrackMetadata {
return {
@@ -812,6 +845,7 @@
})};
return processReturn("updateSinkMetadata", mStream->updateSinkMetadata(halMetadata));
}
+#endif
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
similarity index 92%
rename from media/libaudiohal/2.0/StreamHalHidl.h
rename to media/libaudiohal/impl/StreamHalHidl.h
index ebad8ae..95ec7f1 100644
--- a/media/libaudiohal/2.0/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -20,8 +20,11 @@
#include <atomic>
#include <android/hardware/audio/2.0/IStream.h>
+#include <android/hardware/audio/4.0/IStream.h>
#include <android/hardware/audio/2.0/IStreamIn.h>
+#include <android/hardware/audio/4.0/IStreamIn.h>
#include <android/hardware/audio/2.0/IStreamOut.h>
+#include <android/hardware/audio/4.0/IStreamOut.h>
#include <fmq/EventFlag.h>
#include <fmq/MessageQueue.h>
#include <media/audiohal/StreamHalInterface.h>
@@ -29,18 +32,19 @@
#include "ConversionHelperHidl.h"
#include "StreamPowerLog.h"
-using ::android::hardware::audio::V2_0::IStream;
-using ::android::hardware::audio::V2_0::IStreamIn;
-using ::android::hardware::audio::V2_0::IStreamOut;
+using ::android::hardware::audio::CPP_VERSION::IStream;
+using ::android::hardware::audio::CPP_VERSION::IStreamIn;
+using ::android::hardware::audio::CPP_VERSION::IStreamOut;
using ::android::hardware::EventFlag;
using ::android::hardware::MessageQueue;
using ::android::hardware::Return;
-using ReadParameters = ::android::hardware::audio::V2_0::IStreamIn::ReadParameters;
-using ReadStatus = ::android::hardware::audio::V2_0::IStreamIn::ReadStatus;
-using WriteCommand = ::android::hardware::audio::V2_0::IStreamOut::WriteCommand;
-using WriteStatus = ::android::hardware::audio::V2_0::IStreamOut::WriteStatus;
+using ReadParameters = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadParameters;
+using ReadStatus = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadStatus;
+using WriteCommand = ::android::hardware::audio::CPP_VERSION::IStreamOut::WriteCommand;
+using WriteStatus = ::android::hardware::audio::CPP_VERSION::IStreamOut::WriteStatus;
namespace android {
+namespace CPP_VERSION {
class DeviceHalHidl;
@@ -243,6 +247,7 @@
status_t prepareForReading(size_t bufferSize);
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
similarity index 97%
rename from media/libaudiohal/4.0/StreamHalLocal.cpp
rename to media/libaudiohal/impl/StreamHalLocal.cpp
index e9d96bf..b134f57 100644
--- a/media/libaudiohal/4.0/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -25,7 +25,7 @@
#include "VersionUtils.h"
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
: mDevice(device),
@@ -341,6 +341,12 @@
return mStream->get_mmap_position(mStream, position);
}
+#if MAJOR_VERSION == 2
+status_t StreamInHalLocal::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
status_t StreamInHalLocal::getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
if (mStream->get_active_microphones == NULL) return INVALID_OPERATION;
size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
@@ -352,6 +358,7 @@
}
return status;
}
+#endif
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
similarity index 99%
rename from media/libaudiohal/2.0/StreamHalLocal.h
rename to media/libaudiohal/impl/StreamHalLocal.h
index cda8d0c..cea4229 100644
--- a/media/libaudiohal/2.0/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -21,6 +21,7 @@
#include "StreamPowerLog.h"
namespace android {
+namespace CPP_VERSION {
class DeviceHalLocal;
@@ -214,6 +215,7 @@
virtual ~StreamInHalLocal();
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
diff --git a/media/libaudiohal/2.0/StreamPowerLog.h b/media/libaudiohal/impl/StreamPowerLog.h
similarity index 98%
rename from media/libaudiohal/2.0/StreamPowerLog.h
rename to media/libaudiohal/impl/StreamPowerLog.h
index a78b1aa..5fd3912 100644
--- a/media/libaudiohal/2.0/StreamPowerLog.h
+++ b/media/libaudiohal/impl/StreamPowerLog.h
@@ -23,6 +23,7 @@
#include <system/audio.h>
namespace android {
+namespace CPP_VERSION {
class StreamPowerLog {
public:
@@ -97,6 +98,7 @@
size_t mFrameSize;
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_STREAM_POWER_LOG_H
diff --git a/media/libaudiohal/impl/VersionMacro.h b/media/libaudiohal/impl/VersionMacro.h
new file mode 100644
index 0000000..98e9c07
--- /dev/null
+++ b/media/libaudiohal/impl/VersionMacro.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_VERSION_MACRO_H
+#define ANDROID_HARDWARE_VERSION_MACRO_H
+
+#if !defined(MAJOR_VERSION) || !defined(MINOR_VERSION)
+#error "MAJOR_VERSION and MINOR_VERSION must be defined"
+#endif
+
+#define CONCAT_3(a,b,c) a##b##c
+#define EXPAND_CONCAT_3(a,b,c) CONCAT_3(a,b,c)
+/** The directory name of the version: <major>.<minor> */
+#define FILE_VERSION EXPAND_CONCAT_3(MAJOR_VERSION,.,MINOR_VERSION)
+
+#define CONCAT_4(a,b,c,d) a##b##c##d
+#define EXPAND_CONCAT_4(a,b,c,d) CONCAT_4(a,b,c,d)
+/** The c++ namespace of the version: V<major>_<minor> */
+#define CPP_VERSION EXPAND_CONCAT_4(V,MAJOR_VERSION,_,MINOR_VERSION)
+
+#endif // ANDROID_HARDWARE_VERSION_MACRO_H
diff --git a/media/libaudiohal/4.0/VersionUtils.h b/media/libaudiohal/impl/VersionUtils.h
similarity index 61%
rename from media/libaudiohal/4.0/VersionUtils.h
rename to media/libaudiohal/impl/VersionUtils.h
index 1246c2e..5004895 100644
--- a/media/libaudiohal/4.0/VersionUtils.h
+++ b/media/libaudiohal/impl/VersionUtils.h
@@ -14,22 +14,36 @@
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_VERSION_UTILS_4_0_H
-#define ANDROID_HARDWARE_VERSION_UTILS_4_0_H
+#ifndef ANDROID_HARDWARE_VERSION_UTILS_H
+#define ANDROID_HARDWARE_VERSION_UTILS_H
+#include <android/hardware/audio/2.0/types.h>
#include <android/hardware/audio/4.0/types.h>
#include <hidl/HidlSupport.h>
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::Result;
using ::android::hardware::Return;
using ::android::hardware::hidl_vec;
using ::android::hardware::hidl_string;
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
namespace utils {
+#if MAJOR_VERSION == 2
+template <class T, class Callback>
+Return<void> getParameters(T& object, hidl_vec<ParameterValue> /*context*/,
+ hidl_vec<hidl_string> keys, Callback callback) {
+ return object->getParameters(keys, callback);
+}
+
+template <class T>
+Return<Result> setParameters(T& object, hidl_vec<ParameterValue> /*context*/,
+ hidl_vec<ParameterValue> keys) {
+ return object->setParameters(keys);
+}
+#elif MAJOR_VERSION == 4
template <class T, class Callback>
Return<void> getParameters(T& object, hidl_vec<ParameterValue> context,
hidl_vec<hidl_string> keys, Callback callback) {
@@ -41,9 +55,10 @@
hidl_vec<ParameterValue> keys) {
return object->setParameters(context, keys);
}
+#endif
} // namespace utils
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
-#endif // ANDROID_HARDWARE_VERSION_UTILS_4_0_H
+#endif // ANDROID_HARDWARE_VERSION_UTILS_H
diff --git a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
new file mode 100644
index 0000000..fa0effc
--- /dev/null
+++ b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
+#define ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
+
+/** @file Library entry points to create the HAL factories. */
+
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+namespace V2_0 {
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
+} // namespace V2_0
+
+namespace V4_0 {
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
+} // namespace V4_0
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index f6f817a..f3ea826 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -875,7 +875,7 @@
t->hook = Track::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
t->mMixerInFormat, t->mMixerFormat);
ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
- "Track %d needs downmix + resample", i);
+ "Track %d needs downmix + resample", name);
} else {
if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
t->hook = Track::getTrackHook(
@@ -890,7 +890,7 @@
t->hook = Track::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
t->mMixerInFormat, t->mMixerFormat);
ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
- "Track %d needs downmix", i);
+ "Track %d needs downmix", name);
}
}
}
diff --git a/media/libaudioprocessing/include/media/AudioResamplerPublic.h b/media/libaudioprocessing/include/media/AudioResamplerPublic.h
index 055f724..50ca33d 100644
--- a/media/libaudioprocessing/include/media/AudioResamplerPublic.h
+++ b/media/libaudioprocessing/include/media/AudioResamplerPublic.h
@@ -104,8 +104,8 @@
const AudioPlaybackRate &pr2) {
return fabs(pr1.mSpeed - pr2.mSpeed) < AUDIO_TIMESTRETCH_SPEED_MIN_DELTA &&
fabs(pr1.mPitch - pr2.mPitch) < AUDIO_TIMESTRETCH_PITCH_MIN_DELTA &&
- pr2.mStretchMode == pr2.mStretchMode &&
- pr2.mFallbackMode == pr2.mFallbackMode;
+ pr1.mStretchMode == pr2.mStretchMode &&
+ pr1.mFallbackMode == pr2.mFallbackMode;
}
static inline bool isAudioPlaybackRateValid(const AudioPlaybackRate &playbackRate) {
diff --git a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
index 9d29cf1..d61efd3 100644
--- a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
+++ b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
@@ -30,6 +30,26 @@
#include <audio_effects/effect_loudnessenhancer.h>
#include "dsp/core/dynamic_range_compression.h"
+// BUILD_FLOAT targets building a float effect instead of the legacy int16_t effect.
+#define BUILD_FLOAT
+
+#ifdef BUILD_FLOAT
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_FLOAT;
+
+#else
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_16_BIT;
+
+static inline int16_t clamp16(int32_t sample)
+{
+ if ((sample>>15) ^ (sample>>31))
+ sample = 0x7FFF ^ (sample>>31);
+ return sample;
+}
+
+#endif // BUILD_FLOAT
+
extern "C" {
// effect_handle_t interface implementation for LE effect
@@ -80,13 +100,6 @@
}
}
-static inline int16_t clamp16(int32_t sample)
-{
- if ((sample>>15) ^ (sample>>31))
- sample = 0x7FFF ^ (sample>>31);
- return sample;
-}
-
//----------------------------------------------------------------------------
// LE_setConfig()
//----------------------------------------------------------------------------
@@ -111,7 +124,7 @@
if (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO) return -EINVAL;
if (pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_WRITE &&
pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_ACCUMULATE) return -EINVAL;
- if (pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) return -EINVAL;
+ if (pConfig->inputCfg.format != kProcessFormat) return -EINVAL;
pContext->mConfig = *pConfig;
@@ -159,7 +172,7 @@
pContext->mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
pContext->mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
- pContext->mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pContext->mConfig.inputCfg.format = kProcessFormat;
pContext->mConfig.inputCfg.samplingRate = 44100;
pContext->mConfig.inputCfg.bufferProvider.getBuffer = NULL;
pContext->mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
@@ -167,7 +180,7 @@
pContext->mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
pContext->mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
pContext->mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
- pContext->mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pContext->mConfig.outputCfg.format = kProcessFormat;
pContext->mConfig.outputCfg.samplingRate = 44100;
pContext->mConfig.outputCfg.bufferProvider.getBuffer = NULL;
pContext->mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
@@ -284,18 +297,41 @@
//ALOGV("LE about to process %d samples", inBuffer->frameCount);
uint16_t inIdx;
+#ifdef BUILD_FLOAT
+ constexpr float scale = 1 << 15; // power of 2 is lossless conversion to int16_t range
+ constexpr float inverseScale = 1.f / scale;
+ const float inputAmp = pow(10, pContext->mTargetGainmB/2000.0f) * scale;
+#else
float inputAmp = pow(10, pContext->mTargetGainmB/2000.0f);
+#endif
float leftSample, rightSample;
for (inIdx = 0 ; inIdx < inBuffer->frameCount ; inIdx++) {
// makeup gain is applied on the input of the compressor
+#ifdef BUILD_FLOAT
+ leftSample = inputAmp * inBuffer->f32[2*inIdx];
+ rightSample = inputAmp * inBuffer->f32[2*inIdx +1];
+ pContext->mCompressor->Compress(&leftSample, &rightSample);
+ inBuffer->f32[2*inIdx] = leftSample * inverseScale;
+ inBuffer->f32[2*inIdx +1] = rightSample * inverseScale;
+#else
leftSample = inputAmp * (float)inBuffer->s16[2*inIdx];
rightSample = inputAmp * (float)inBuffer->s16[2*inIdx +1];
pContext->mCompressor->Compress(&leftSample, &rightSample);
inBuffer->s16[2*inIdx] = (int16_t) leftSample;
inBuffer->s16[2*inIdx +1] = (int16_t) rightSample;
+#endif // BUILD_FLOAT
}
if (inBuffer->raw != outBuffer->raw) {
+#ifdef BUILD_FLOAT
+ if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ for (size_t i = 0; i < outBuffer->frameCount*2; i++) {
+ outBuffer->f32[i] += inBuffer->f32[i];
+ }
+ } else {
+ memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount * 2 * sizeof(float));
+ }
+#else
if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
for (size_t i = 0; i < outBuffer->frameCount*2; i++) {
outBuffer->s16[i] = clamp16(outBuffer->s16[i] + inBuffer->s16[i]);
@@ -303,6 +339,7 @@
} else {
memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount * 2 * sizeof(int16_t));
}
+#endif // BUILD_FLOAT
}
if (pContext->mState != LOUDNESS_ENHANCER_STATE_ACTIVE) {
return -ENODATA;
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
index 70409de..3534149 100644
--- a/media/libeffects/visualizer/Android.mk
+++ b/media/libeffects/visualizer/Android.mk
@@ -19,7 +19,8 @@
LOCAL_MODULE:= libvisualizer
LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-effects)
+ $(call include-path-for, audio-effects) \
+ $(call include-path-for, audio-utils)
LOCAL_HEADER_LIBRARIES += libhardware_headers
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 807f24d..e2ccfb7 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -24,11 +24,25 @@
#include <string.h>
#include <time.h>
+#include <algorithm> // max
#include <new>
#include <log/log.h>
#include <audio_effects/effect_visualizer.h>
+#include <audio_utils/primitives.h>
+
+#define BUILD_FLOAT
+
+#ifdef BUILD_FLOAT
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_FLOAT;
+
+#else
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_16_BIT;
+
+#endif // BUILD_FLOAT
extern "C" {
@@ -146,7 +160,7 @@
if (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO) return -EINVAL;
if (pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_WRITE &&
pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_ACCUMULATE) return -EINVAL;
- if (pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) return -EINVAL;
+ if (pConfig->inputCfg.format != kProcessFormat) return -EINVAL;
pContext->mConfig = *pConfig;
@@ -192,7 +206,7 @@
{
pContext->mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
pContext->mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
- pContext->mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pContext->mConfig.inputCfg.format = kProcessFormat;
pContext->mConfig.inputCfg.samplingRate = 44100;
pContext->mConfig.inputCfg.bufferProvider.getBuffer = NULL;
pContext->mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
@@ -200,7 +214,7 @@
pContext->mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
pContext->mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
pContext->mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
- pContext->mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pContext->mConfig.outputCfg.format = kProcessFormat;
pContext->mConfig.outputCfg.samplingRate = 44100;
pContext->mConfig.outputCfg.bufferProvider.getBuffer = NULL;
pContext->mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
@@ -301,15 +315,8 @@
//--- Effect Control Interface Implementation
//
-static inline int16_t clamp16(int32_t sample)
-{
- if ((sample>>15) ^ (sample>>31))
- sample = 0x7FFF ^ (sample>>31);
- return sample;
-}
-
int Visualizer_process(
- effect_handle_t self,audio_buffer_t *inBuffer, audio_buffer_t *outBuffer)
+ effect_handle_t self, audio_buffer_t *inBuffer, audio_buffer_t *outBuffer)
{
VisualizerContext * pContext = (VisualizerContext *)self;
@@ -324,20 +331,28 @@
return -EINVAL;
}
+ const size_t sampleLen = inBuffer->frameCount * pContext->mChannelCount;
+
// perform measurements if needed
if (pContext->mMeasurementMode & MEASUREMENT_MODE_PEAK_RMS) {
// find the peak and RMS squared for the new buffer
- uint32_t inIdx;
- int16_t maxSample = 0;
float rmsSqAcc = 0;
- for (inIdx = 0 ; inIdx < inBuffer->frameCount * pContext->mChannelCount ; inIdx++) {
- if (inBuffer->s16[inIdx] > maxSample) {
- maxSample = inBuffer->s16[inIdx];
- } else if (-inBuffer->s16[inIdx] > maxSample) {
- maxSample = -inBuffer->s16[inIdx];
- }
- rmsSqAcc += (inBuffer->s16[inIdx] * inBuffer->s16[inIdx]);
+
+#ifdef BUILD_FLOAT
+ float maxSample = 0.f;
+ for (size_t inIdx = 0; inIdx < sampleLen; ++inIdx) {
+ maxSample = fmax(maxSample, fabs(inBuffer->f32[inIdx]));
+ rmsSqAcc += inBuffer->f32[inIdx] * inBuffer->f32[inIdx];
}
+ maxSample *= 1 << 15; // scale to int16_t, with exactly 1 << 15 representing positive num.
+ rmsSqAcc *= 1 << 30; // scale to int16_t * 2
+#else
+ int maxSample = 0;
+ for (size_t inIdx = 0; inIdx < sampleLen; ++inIdx) {
+ maxSample = std::max(maxSample, std::abs(int32_t(inBuffer->s16[inIdx])));
+ rmsSqAcc += inBuffer->s16[inIdx] * inBuffer->s16[inIdx];
+ }
+#endif
// store the measurement
pContext->mPastMeasurements[pContext->mMeasurementBufferIdx].mPeakU16 = (uint16_t)maxSample;
pContext->mPastMeasurements[pContext->mMeasurementBufferIdx].mRmsSquared =
@@ -348,32 +363,59 @@
}
}
- // all code below assumes stereo 16 bit PCM output and input
+#ifdef BUILD_FLOAT
+ float fscale; // multiplicative scale
+#else
int32_t shift;
+#endif // BUILD_FLOAT
if (pContext->mScalingMode == VISUALIZER_SCALING_MODE_NORMALIZED) {
// derive capture scaling factor from peak value in current buffer
// this gives more interesting captures for display.
- shift = 32;
- int len = inBuffer->frameCount * 2;
- for (int i = 0; i < len; i++) {
+
+#ifdef BUILD_FLOAT
+ float maxSample = 0.f;
+ for (size_t inIdx = 0; inIdx < sampleLen; ++inIdx) {
+ maxSample = fmax(maxSample, fabs(inBuffer->f32[inIdx]));
+ }
+ if (maxSample > 0.f) {
+ constexpr float halfish = 127.f / 256.f;
+ fscale = halfish / maxSample;
+ int exp; // unused
+ const float significand = frexp(fscale, &exp);
+ if (significand == 0.5f) {
+ fscale *= 255.f / 256.f; // avoid returning unaltered PCM signal
+ }
+ } else {
+ // scale doesn't matter, the values are all 0.
+ fscale = 1.f;
+ }
+#else
+ int32_t orAccum = 0;
+ for (size_t i = 0; i < sampleLen; ++i) {
int32_t smp = inBuffer->s16[i];
if (smp < 0) smp = -smp - 1; // take care to keep the max negative in range
- int32_t clz = __builtin_clz(smp);
- if (shift > clz) shift = clz;
+ orAccum |= smp;
}
+
// A maximum amplitude signal will have 17 leading zeros, which we want to
// translate to a shift of 8 (for converting 16 bit to 8 bit)
- shift = 25 - shift;
+ shift = 25 - __builtin_clz(orAccum);
+
// Never scale by less than 8 to avoid returning unaltered PCM signal.
if (shift < 3) {
shift = 3;
}
// add one to combine the division by 2 needed after summing left and right channels below
shift++;
+#endif // BUILD_FLOAT
} else {
assert(pContext->mScalingMode == VISUALIZER_SCALING_MODE_AS_PLAYED);
+#ifdef BUILD_FLOAT
+ fscale = 0.5f; // default divide by 2 to account for sum of L + R.
+#else
shift = 9;
+#endif // BUILD_FLOAT
}
uint32_t captIdx;
@@ -386,9 +428,13 @@
// wrap around
captIdx = 0;
}
- int32_t smp = inBuffer->s16[2 * inIdx] + inBuffer->s16[2 * inIdx + 1];
- smp = smp >> shift;
+#ifdef BUILD_FLOAT
+ const float smp = (inBuffer->f32[2 * inIdx] + inBuffer->f32[2 * inIdx + 1]) * fscale;
+ buf[captIdx] = clamp8_from_float(smp);
+#else
+ const int32_t smp = (inBuffer->s16[2 * inIdx] + inBuffer->s16[2 * inIdx + 1]) >> shift;
buf[captIdx] = ((uint8_t)smp)^0x80;
+#endif // BUILD_FLOAT
}
// XXX the following two should really be atomic, though it probably doesn't
@@ -400,6 +446,15 @@
}
if (inBuffer->raw != outBuffer->raw) {
+#ifdef BUILD_FLOAT
+ if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ for (size_t i = 0; i < sampleLen; ++i) {
+ outBuffer->f32[i] += inBuffer->f32[i];
+ }
+ } else {
+ memcpy(outBuffer->raw, inBuffer->raw, sampleLen * sizeof(float));
+ }
+#else
if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
for (size_t i = 0; i < outBuffer->frameCount*2; i++) {
outBuffer->s16[i] = clamp16(outBuffer->s16[i] + inBuffer->s16[i]);
@@ -407,6 +462,7 @@
} else {
memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount * 2 * sizeof(int16_t));
}
+#endif // BUILD_FLOAT
}
if (pContext->mState != VISUALIZER_STATE_ACTIVE) {
return -ENODATA;
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 1b3a1be..e6d6b3e 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -20,7 +20,7 @@
vndk: {
enabled: true,
},
- srcs: ["AudioParameter.cpp", "TypeConverter.cpp", "TimeCheck.cpp"],
+ srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
cflags: [
"-Werror",
"-Wno-error=deprecated-declarations",
@@ -32,6 +32,9 @@
"libaudioclient_headers",
"libaudio_system_headers",
],
+ export_header_lib_headers: [
+ "libmedia_headers",
+ ],
clang: true,
}
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 034f7c2..1c95e27 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -36,6 +36,8 @@
const char * const AudioParameter::keyHwAvSync = AUDIO_PARAMETER_HW_AV_SYNC;
const char * const AudioParameter::keyPresentationId = AUDIO_PARAMETER_STREAM_PRESENTATION_ID;
const char * const AudioParameter::keyProgramId = AUDIO_PARAMETER_STREAM_PROGRAM_ID;
+const char * const AudioParameter::keyAudioLanguagePreferred =
+ AUDIO_PARAMETER_KEY_AUDIO_LANGUAGE_PREFERRED;
const char * const AudioParameter::keyMonoOutput = AUDIO_PARAMETER_MONO_OUTPUT;
const char * const AudioParameter::keyStreamHwAvSync = AUDIO_PARAMETER_STREAM_HW_AV_SYNC;
const char * const AudioParameter::keyStreamConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index a3db754..514c795 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -132,6 +132,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_MMAP_NOIRQ),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_VOIP_TX),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_AV_SYNC),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_DIRECT),
TERMINATOR
};
diff --git a/media/libmedia/include/media/PatchBuilder.h b/media/libmedia/include/media/PatchBuilder.h
new file mode 100644
index 0000000..f2722a6
--- /dev/null
+++ b/media/libmedia/include/media/PatchBuilder.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PATCH_BUILDER_H
+#define ANDROID_PATCH_BUILDER_H
+
+#include <functional>
+#include <utility>
+
+#include <system/audio.h>
+#include <utils/StrongPointer.h>
+
+// This is a header-only utility.
+
+namespace android {
+
+class PatchBuilder {
+ public:
+ using mix_usecase_t = decltype(audio_port_config_mix_ext::usecase);
+
+ PatchBuilder() = default;
+
+ // All existing methods operating on audio patches take a pointer to const.
+ // It's OK to construct a temporary PatchBuilder while preparing a parameter
+ // to such a function because the Builder will be kept alive until the code
+ // execution reaches the function call statement semicolon.
+ const struct audio_patch* patch() const { return &mPatch; }
+
+ template<typename T, typename... S>
+ PatchBuilder& addSink(T&& t, S&&... s) {
+ sinks().add(std::forward<T>(t), std::forward<S>(s)...);
+ return *this;
+ }
+ // Explicit type of the second parameter allows clients to provide the struct inline.
+ template<typename T>
+ PatchBuilder& addSink(T&& t, const mix_usecase_t& update) {
+ sinks().add(std::forward<T>(t), update);
+ return *this;
+ }
+ template<typename T, typename... S>
+ PatchBuilder& addSource(T&& t, S&&... s) {
+ sources().add(std::forward<T>(t), std::forward<S>(s)...);
+ return *this;
+ }
+ // Explicit type of the second parameter allows clients to provide the struct inline.
+ template<typename T>
+ PatchBuilder& addSource(T&& t, const mix_usecase_t& update) {
+ sources().add(std::forward<T>(t), update);
+ return *this;
+ }
+
+ private:
+ struct PortCfgs {
+ PortCfgs(unsigned int *countPtr, struct audio_port_config *portCfgs)
+ : mCountPtr(countPtr), mPortCfgs(portCfgs) {}
+ audio_port_config& add(const audio_port_config& portCfg) {
+ return *advance() = portCfg;
+ }
+ template<typename T>
+ audio_port_config& add(const sp<T>& entity) {
+ audio_port_config* added = advance();
+ entity->toAudioPortConfig(added);
+ return *added;
+ }
+ template<typename T>
+ void add(const sp<T>& entity, const mix_usecase_t& usecaseUpdate) {
+ add(entity).ext.mix.usecase = usecaseUpdate;
+ }
+ template<typename T>
+ void add(const sp<T>& entity,
+ std::function<mix_usecase_t(const mix_usecase_t&)> usecaseUpdater) {
+ mix_usecase_t* usecase = &add(entity).ext.mix.usecase;
+ *usecase = usecaseUpdater(*usecase);
+ }
+ struct audio_port_config* advance() {
+ return &mPortCfgs[(*mCountPtr)++];
+ }
+ unsigned int *mCountPtr;
+ struct audio_port_config *mPortCfgs;
+ };
+
+ PortCfgs sinks() { return PortCfgs(&mPatch.num_sinks, mPatch.sinks); }
+ PortCfgs sources() { return PortCfgs(&mPatch.num_sources, mPatch.sources); }
+
+ struct audio_patch mPatch = {};
+};
+
+} // namespace android
+
+#endif // ANDROID_PATCH_BUILDER_H
diff --git a/media/libmedia/include/media/SingleStateQueue.h b/media/libmedia/include/media/SingleStateQueue.h
index d423962..c2761cb 100644
--- a/media/libmedia/include/media/SingleStateQueue.h
+++ b/media/libmedia/include/media/SingleStateQueue.h
@@ -99,6 +99,13 @@
return mShared->mAck - sequence >= 0;
}
+ // returns the last value written (or the contents of the shared buffer after initialization
+ // if no value was written).
+ T last() const
+ { // assume no sequence check required - we are the writer.
+ return mShared->mValue;
+ }
+
private:
int32_t mSequence;
Shared * const mShared;
diff --git a/media/libmediaextractor/MediaBuffer.cpp b/media/libmediaextractor/MediaBuffer.cpp
index 39f8d6e..d197b3f 100644
--- a/media/libmediaextractor/MediaBuffer.cpp
+++ b/media/libmediaextractor/MediaBuffer.cpp
@@ -39,7 +39,7 @@
mRangeOffset(0),
mRangeLength(size),
mOwnsData(false),
- mMetaData(new MetaData),
+ mMetaData(new MetaDataBase),
mOriginal(NULL) {
}
@@ -51,7 +51,7 @@
mRangeOffset(0),
mRangeLength(size),
mOwnsData(true),
- mMetaData(new MetaData),
+ mMetaData(new MetaDataBase),
mOriginal(NULL) {
if (size < kSharedMemThreshold
|| std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
@@ -84,7 +84,7 @@
mRangeLength(mSize),
mBuffer(buffer),
mOwnsData(false),
- mMetaData(new MetaData),
+ mMetaData(new MetaDataBase),
mOriginal(NULL) {
}
@@ -96,7 +96,7 @@
return;
}
- int prevCount = __sync_fetch_and_sub(&mRefCount, 1);
+ int prevCount = mRefCount.fetch_sub(1);
if (prevCount == 1) {
if (mObserver == NULL) {
delete this;
@@ -110,13 +110,13 @@
void MediaBuffer::claim() {
CHECK(mObserver != NULL);
- CHECK_EQ(mRefCount, 1);
+ CHECK_EQ(mRefCount.load(std::memory_order_relaxed), 1);
- mRefCount = 0;
+ mRefCount.store(0, std::memory_order_relaxed);
}
void MediaBuffer::add_ref() {
- (void) __sync_fetch_and_add(&mRefCount, 1);
+ (void) mRefCount.fetch_add(1);
}
void *MediaBuffer::data() const {
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
index f944d51..5a25965 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
@@ -86,12 +86,14 @@
virtual MediaBufferBase *clone();
// sum of localRefcount() and remoteRefcount()
+ // Result should be treated as approximate unless the result precludes concurrent accesses.
virtual int refcount() const {
return localRefcount() + remoteRefcount();
}
+ // Result should be treated as approximate unless the result precludes concurrent accesses.
virtual int localRefcount() const {
- return mRefCount;
+ return mRefCount.load(std::memory_order_relaxed);
}
virtual int remoteRefcount() const {
@@ -146,7 +148,7 @@
void claim();
MediaBufferObserver *mObserver;
- int mRefCount;
+ std::atomic<int> mRefCount;
void *mData;
size_t mSize, mRangeOffset, mRangeLength;
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 1fa8789..0fb5abc 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -9,6 +9,7 @@
srcs: [
"JAudioTrack.cpp",
+ "JavaVMHelper.cpp",
"MediaPlayer2AudioOutput.cpp",
"mediaplayer2.cpp",
],
@@ -49,6 +50,10 @@
"media_plugin_headers",
],
+ include_dirs: [
+ "frameworks/base/core/jni",
+ ],
+
static_libs: [
"libmedia_helper",
"libstagefright_nuplayer2",
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index ac0cc57..778ae1b 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -21,7 +21,7 @@
#include "mediaplayer2/JAudioTrack.h"
#include <android_media_AudioErrors.h>
-#include <android_runtime/AndroidRuntime.h>
+#include <mediaplayer2/JavaVMHelper.h>
namespace android {
@@ -39,7 +39,7 @@
const audio_attributes_t* pAttributes, // AudioAttributes
float maxRequiredSpeed) { // bufferSizeInBytes
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
mAudioTrackCls = (jclass) env->NewGlobalRef(jAudioTrackCls);
@@ -116,19 +116,19 @@
}
JAudioTrack::~JAudioTrack() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
env->DeleteGlobalRef(mAudioTrackCls);
}
size_t JAudioTrack::frameCount() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetBufferSizeInFrames = env->GetMethodID(
mAudioTrackCls, "getBufferSizeInFrames", "()I");
return env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
}
size_t JAudioTrack::channelCount() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
return env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
}
@@ -143,7 +143,7 @@
return BAD_VALUE;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetPlaybackHeadPosition = env->GetMethodID(
mAudioTrackCls, "getPlaybackHeadPosition", "()I");
*position = env->CallIntMethod(mAudioTrackObj, jGetPlaybackHeadPosition);
@@ -152,7 +152,7 @@
}
bool JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
@@ -189,7 +189,7 @@
status_t JAudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) {
// TODO: existing native AudioTrack returns INVALID_OPERATION on offload/direct/fast tracks.
// Should we do the same thing?
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
jmethodID jPlaybackParamsCtor = env->GetMethodID(jPlaybackParamsCls, "<init>", "()V");
@@ -224,7 +224,7 @@
}
const AudioPlaybackRate JAudioTrack::getPlaybackRate() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetPlaybackParams = env->GetMethodID(
mAudioTrackCls, "getPlaybackParams", "()Landroid/media/PlaybackParams;");
@@ -266,7 +266,7 @@
return media::VolumeShaper::Status(BAD_VALUE);
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jCreateVolumeShaper = env->GetMethodID(mAudioTrackCls, "createVolumeShaper",
"(Landroid/media/VolumeShaper$Configuration;)Landroid/media/VolumeShaper;");
@@ -282,7 +282,7 @@
}
status_t JAudioTrack::setAuxEffectSendLevel(float level) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jSetAuxEffectSendLevel = env->GetMethodID(
mAudioTrackCls, "setAuxEffectSendLevel", "(F)I");
int result = env->CallIntMethod(mAudioTrackObj, jSetAuxEffectSendLevel, level);
@@ -290,14 +290,14 @@
}
status_t JAudioTrack::attachAuxEffect(int effectId) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jAttachAuxEffect = env->GetMethodID(mAudioTrackCls, "attachAuxEffect", "(I)I");
int result = env->CallIntMethod(mAudioTrackObj, jAttachAuxEffect, effectId);
return javaToNativeStatus(result);
}
status_t JAudioTrack::setVolume(float left, float right) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
// TODO: Java setStereoVolume is deprecated. Do we really need this method?
jmethodID jSetStereoVolume = env->GetMethodID(mAudioTrackCls, "setStereoVolume", "(FF)I");
int result = env->CallIntMethod(mAudioTrackObj, jSetStereoVolume, left, right);
@@ -305,14 +305,14 @@
}
status_t JAudioTrack::setVolume(float volume) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jSetVolume = env->GetMethodID(mAudioTrackCls, "setVolume", "(F)I");
int result = env->CallIntMethod(mAudioTrackObj, jSetVolume, volume);
return javaToNativeStatus(result);
}
status_t JAudioTrack::start() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jPlay = env->GetMethodID(mAudioTrackCls, "play", "()V");
// TODO: Should we catch the Java IllegalStateException from play()?
env->CallVoidMethod(mAudioTrackObj, jPlay);
@@ -324,7 +324,7 @@
return BAD_VALUE;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jbyteArray jAudioData = env->NewByteArray(size);
env->SetByteArrayRegion(jAudioData, 0, size, (jbyte *) buffer);
@@ -353,7 +353,7 @@
}
void JAudioTrack::stop() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jStop = env->GetMethodID(mAudioTrackCls, "stop", "()V");
env->CallVoidMethod(mAudioTrackObj, jStop);
// TODO: Should we catch IllegalStateException?
@@ -365,20 +365,20 @@
}
void JAudioTrack::flush() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jFlush = env->GetMethodID(mAudioTrackCls, "flush", "()V");
env->CallVoidMethod(mAudioTrackObj, jFlush);
}
void JAudioTrack::pause() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jPause = env->GetMethodID(mAudioTrackCls, "pause", "()V");
env->CallVoidMethod(mAudioTrackObj, jPause);
// TODO: Should we catch IllegalStateException?
}
bool JAudioTrack::isPlaying() const {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetPlayState = env->GetMethodID(mAudioTrackCls, "getPlayState", "()I");
int currentPlayState = env->CallIntMethod(mAudioTrackObj, jGetPlayState);
@@ -393,7 +393,7 @@
}
uint32_t JAudioTrack::getSampleRate() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetSampleRate = env->GetMethodID(mAudioTrackCls, "getSampleRate", "()I");
return env->CallIntMethod(mAudioTrackObj, jGetSampleRate);
}
@@ -403,7 +403,7 @@
return BAD_VALUE;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetBufferSizeInFrames = env->GetMethodID(
mAudioTrackCls, "getBufferSizeInFrames", "()I");
int bufferSizeInFrames = env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
@@ -417,7 +417,7 @@
}
audio_format_t JAudioTrack::format() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
return audioFormatToNative(javaFormat);
@@ -454,7 +454,7 @@
}
audio_port_handle_t JAudioTrack::getRoutedDeviceId() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetRoutedDevice = env->GetMethodID(mAudioTrackCls, "getRoutedDevice",
"()Landroid/media/AudioDeviceInfo;");
jobject jAudioDeviceInfoObj = env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
@@ -469,14 +469,14 @@
}
audio_session_t JAudioTrack::getAudioSessionId() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetAudioSessionId = env->GetMethodID(mAudioTrackCls, "getAudioSessionId", "()I");
jint sessionId = env->CallIntMethod(mAudioTrackObj, jGetAudioSessionId);
return (audio_session_t) sessionId;
}
status_t JAudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jMP2ImplCls = env->FindClass("android/media/MediaPlayer2Impl");
jmethodID jSetAudioOutputDeviceById = env->GetMethodID(
jMP2ImplCls, "setAudioOutputDeviceById", "(Landroid/media/AudioTrack;I)Z");
@@ -550,7 +550,7 @@
return NULL;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
// Referenced "android_media_VolumeShaper.h".
jfloatArray xarray = nullptr;
@@ -595,7 +595,7 @@
jobject JAudioTrack::createVolumeShaperOperationObj(
const sp<media::VolumeShaper::Operation>& operation) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Operation$Builder");
jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
@@ -647,7 +647,7 @@
}
jobject JAudioTrack::createStreamEventCallback(callback_t cbf, void* user) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jCallbackCls = env->FindClass("android/media/MediaPlayer2Impl$StreamEventCallback");
jmethodID jCallbackCtor = env->GetMethodID(jCallbackCls, "<init>", "(JJJ)V");
jobject jCallbackObj = env->NewObject(jCallbackCls, jCallbackCtor, this, cbf, user);
@@ -655,7 +655,7 @@
}
jobject JAudioTrack::createCallbackExecutor() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jExecutorsCls = env->FindClass("java/util/concurrent/Executors");
jmethodID jNewSingleThreadExecutor = env->GetStaticMethodID(jExecutorsCls,
"newSingleThreadExecutor", "()Ljava/util/concurrent/ExecutorService;");
diff --git a/media/libmediaplayer2/JavaVMHelper.cpp b/media/libmediaplayer2/JavaVMHelper.cpp
new file mode 100644
index 0000000..90aaa7f
--- /dev/null
+++ b/media/libmediaplayer2/JavaVMHelper.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JavaVMHelper"
+
+#include "mediaplayer2/JavaVMHelper.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <stdlib.h>
+
+namespace android {
+
+// static
+std::atomic<JavaVM *> JavaVMHelper::sJavaVM(NULL);
+
+// static
+JNIEnv *JavaVMHelper::getJNIEnv() {
+ JNIEnv *env;
+ JavaVM *vm = sJavaVM.load();
+ CHECK(vm != NULL);
+
+ if (vm->GetEnv((void **)&env, JNI_VERSION_1_4) != JNI_OK) {
+ return NULL;
+ }
+
+ return env;
+}
+
+// static
+void JavaVMHelper::setJavaVM(JavaVM *vm) {
+ sJavaVM.store(vm);
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
new file mode 100644
index 0000000..35091b7
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef JAVA_VM_HELPER_H_
+
+#define JAVA_VM_HELPER_H_
+
+#include "jni.h"
+
+#include <atomic>
+
+namespace android {
+
+struct JavaVMHelper {
+ static JNIEnv *getJNIEnv();
+ static void setJavaVM(JavaVM *vm);
+
+private:
+ // Once a valid JavaVM has been set, it should never be reset or changed.
+ // However, as it may be accessed from multiple threads, access needs to be
+ // synchronized.
+ static std::atomic<JavaVM *> sJavaVM;
+};
+
+} // namespace android
+
+#endif // JAVA_VM_HELPER_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
index 3905b55..211a5c0 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
@@ -96,17 +96,20 @@
enum media2_info_type {
// 0xx
MEDIA2_INFO_UNKNOWN = 1,
- // The player was started because it was used as the next player for another
- // player, which just completed playback
- MEDIA2_INFO_STARTED_AS_NEXT = 2,
+ // The player just started the playback of this data source.
+ MEDIA2_INFO_DATA_SOURCE_START = 2,
// The player just pushed the very first video frame for rendering
MEDIA2_INFO_VIDEO_RENDERING_START = 3,
// The player just pushed the very first audio frame for rendering
MEDIA2_INFO_AUDIO_RENDERING_START = 4,
// The player just completed the playback of this data source
- MEDIA2_INFO_PLAYBACK_COMPLETE = 5,
- // The player just completed the playback of the full play list
- MEDIA2_INFO_PLAYLIST_END = 6,
+ MEDIA2_INFO_DATA_SOURCE_END = 5,
+ // The player just completed the playback of all data sources.
+ // But this is not visible in native code. Just keep this entry for completeness.
+ MEDIA2_INFO_DATA_SOURCE_LIST_END = 6,
+ // The player just completed an iteration of playback loop. This event is sent only when
+ // looping is enabled.
+ MEDIA2_INFO_DATA_SOURCE_REPEAT = 7,
//1xx
// The player just prepared a data source.
@@ -150,11 +153,11 @@
// Do not change these values without updating their counterparts in MediaPlayer2.java
enum mediaplayer2_states {
- MEDIAPLAYER2_STATE_IDLE = 1,
- MEDIAPLAYER2_STATE_PREPARED = 2,
- MEDIAPLAYER2_STATE_PLAYING = 3,
- MEDIAPLAYER2_STATE_PAUSED = 4,
- MEDIAPLAYER2_STATE_ERROR = 5,
+ MEDIAPLAYER2_STATE_IDLE = 1001,
+ MEDIAPLAYER2_STATE_PREPARED = 1002,
+ MEDIAPLAYER2_STATE_PLAYING = 1003,
+ MEDIAPLAYER2_STATE_PAUSED = 1004,
+ MEDIAPLAYER2_STATE_ERROR = 1005,
};
enum media_player2_internal_states {
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index d586192..3af212e 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -68,7 +68,7 @@
status_t stop();
status_t pause();
bool isPlaying();
- mediaplayer2_states getMediaPlayer2State();
+ mediaplayer2_states getState();
status_t setPlaybackSettings(const AudioPlaybackRate& rate);
status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint);
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index e5567dc..4fb47b8 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -860,7 +860,7 @@
return false;
}
-mediaplayer2_states MediaPlayer2::getMediaPlayer2State() {
+mediaplayer2_states MediaPlayer2::getState() {
Mutex::Autolock _l(mLock);
if (mCurrentState & MEDIA_PLAYER2_STATE_ERROR) {
return MEDIAPLAYER2_STATE_ERROR;
diff --git a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
index 196b103..a6d88a2 100644
--- a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
+++ b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
@@ -34,7 +34,7 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/ClearDataSourceFactory.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaClock.h>
@@ -368,7 +368,7 @@
String8 contentType;
if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
- mHttpSource = DataSourceFactory::CreateMediaHTTP(mHTTPService);
+ mHttpSource = ClearDataSourceFactory::CreateMediaHTTP(mHTTPService);
if (mHttpSource == NULL) {
ALOGE("Failed to create http source!");
notifyPreparedAndCleanup(UNKNOWN_ERROR);
@@ -378,7 +378,7 @@
mLock.unlock();
// This might take long time if connection has some issue.
- sp<DataSource> dataSource = DataSourceFactory::CreateFromURI(
+ sp<DataSource> dataSource = ClearDataSourceFactory::CreateFromURI(
mHTTPService, uri, &mUriHeaders, &contentType,
static_cast<HTTPBase *>(mHttpSource.get()));
mLock.lock();
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index 060b698..c649573 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -2474,8 +2474,8 @@
if (mDriver != NULL) {
sp<NuPlayer2Driver> driver = mDriver.promote();
if (driver != NULL) {
- notifyListener(previousSrcId, MEDIA2_INFO, MEDIA2_INFO_PLAYBACK_COMPLETE, 0);
- notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_STARTED_AS_NEXT, 0);
+ notifyListener(previousSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_END, 0);
+ notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_START, 0);
}
}
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
index e48e388..e215965 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
@@ -372,10 +372,16 @@
timeUs, mDTVCCPacket->data(), mDTVCCPacket->size());
mDTVCCPacket->setRange(0, 0);
}
+ if (mDTVCCPacket->size() + 2 > mDTVCCPacket->capacity()) {
+ return false;
+ }
memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
br.skipBits(16);
} else if (mDTVCCPacket->size() > 0 && cc_type == 2) {
+ if (mDTVCCPacket->size() + 2 > mDTVCCPacket->capacity()) {
+ return false;
+ }
memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
br.skipBits(16);
@@ -403,6 +409,9 @@
line21CCBuf = new ABuffer((cc_count - i) * sizeof(CCData));
line21CCBuf->setRange(0, 0);
}
+ if (line21CCBuf->size() + sizeof(cc) > line21CCBuf->capacity()) {
+ return false;
+ }
memcpy(line21CCBuf->data() + line21CCBuf->size(), &cc, sizeof(cc));
line21CCBuf->setRange(0, line21CCBuf->size() + sizeof(CCData));
}
@@ -464,6 +473,9 @@
size_t trackIndex = getTrackIndex(kTrackTypeCEA708, service_number, &trackAdded);
if (mSelectedTrack == (ssize_t)trackIndex) {
sp<ABuffer> ccPacket = new ABuffer(block_size);
+ if (ccPacket->capacity() == 0) {
+ return false;
+ }
memcpy(ccPacket->data(), br.data(), block_size);
mCCMap.add(timeUs, ccPacket);
}
@@ -527,10 +539,12 @@
ccBuf = new ABuffer(size);
ccBuf->setRange(0, 0);
- for (ssize_t i = 0; i <= index; ++i) {
- sp<ABuffer> buf = mCCMap.valueAt(i);
- memcpy(ccBuf->data() + ccBuf->size(), buf->data(), buf->size());
- ccBuf->setRange(0, ccBuf->size() + buf->size());
+ if (ccBuf->capacity() > 0) {
+ for (ssize_t i = 0; i <= index; ++i) {
+ sp<ABuffer> buf = mCCMap.valueAt(i);
+ memcpy(ccBuf->data() + ccBuf->size(), buf->data(), buf->size());
+ ccBuf->setRange(0, ccBuf->size() + buf->size());
+ }
}
}
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
index 645138a..931b86e 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -1088,6 +1088,12 @@
static_cast<MediaBufferHolder*>(holder.get())->mediaBuffer() : nullptr;
}
if (mediaBuf != NULL) {
+ if (mediaBuf->size() > codecBuffer->capacity()) {
+ handleError(ERROR_BUFFER_TOO_SMALL);
+ mDequeuedInputBuffers.push_back(bufferIx);
+ return false;
+ }
+
codecBuffer->setRange(0, mediaBuf->size());
memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
index 03d17a5..3069f54 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -930,7 +930,12 @@
// the last little bit of audio. In looping mode, we need to restart it.
mAudioSink->start();
}
- // don't send completion event when looping
+
+ sp<AMessage> notify = new AMessage(kWhatNotifyListener, this);
+ notify->setInt64("srcId", srcId);
+ notify->setInt32("messageId", MEDIA2_INFO);
+ notify->setInt32("ext1", MEDIA2_INFO_DATA_SOURCE_REPEAT);
+ notify->post();
return;
}
if (property_get_bool("persist.debug.sf.stats", false)) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 69cd82e..050e4fb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -1069,6 +1069,12 @@
static_cast<MediaBufferHolder*>(holder.get())->mediaBuffer() : nullptr;
}
if (mediaBuf != NULL) {
+ if (mediaBuf->size() > codecBuffer->capacity()) {
+ handleError(ERROR_BUFFER_TOO_SMALL);
+ mDequeuedInputBuffers.push_back(bufferIx);
+ return false;
+ }
+
codecBuffer->setRange(0, mediaBuf->size());
memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
index bde0862..8d876da 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -239,8 +239,14 @@
size_t *encryptedbytes)
{
// size needed to store all the crypto data
- size_t cryptosize = sizeof(CryptoInfo) +
- sizeof(CryptoPlugin::SubSample) * numSubSamples;
+ size_t cryptosize;
+ // sizeof(CryptoInfo) + sizeof(CryptoPlugin::SubSample) * numSubSamples;
+ if (__builtin_mul_overflow(sizeof(CryptoPlugin::SubSample), numSubSamples, &cryptosize) ||
+ __builtin_add_overflow(cryptosize, sizeof(CryptoInfo), &cryptosize)) {
+ ALOGE("crypto size overflow");
+ return NULL;
+ }
+
CryptoInfo *ret = (CryptoInfo*) malloc(cryptosize);
if (ret == NULL) {
ALOGE("couldn't allocate %zu bytes", cryptosize);
diff --git a/media/libnblog/NBLog.cpp b/media/libnblog/NBLog.cpp
index d6fa3e3..bfc797c 100644
--- a/media/libnblog/NBLog.cpp
+++ b/media/libnblog/NBLog.cpp
@@ -64,7 +64,9 @@
// ---------------------------------------------------------------------------
/*static*/
-std::unique_ptr<NBLog::AbstractEntry> NBLog::AbstractEntry::buildEntry(const uint8_t *ptr) {
+std::unique_ptr<NBLog::AbstractEntry> NBLog::AbstractEntry::buildEntry(const uint8_t *ptr)
+{
+ if (ptr == nullptr) return nullptr;
const uint8_t type = EntryIterator(ptr)->type;
switch (type) {
case EVENT_START_FMT:
@@ -78,31 +80,33 @@
}
}
-NBLog::AbstractEntry::AbstractEntry(const uint8_t *entry) : mEntry(entry) {
+NBLog::AbstractEntry::AbstractEntry(const uint8_t *entry) : mEntry(entry)
+{
}
// ---------------------------------------------------------------------------
-NBLog::EntryIterator NBLog::FormatEntry::begin() const {
+NBLog::EntryIterator NBLog::FormatEntry::begin() const
+{
return EntryIterator(mEntry);
}
-const char *NBLog::FormatEntry::formatString() const {
+const char *NBLog::FormatEntry::formatString() const
+{
return (const char*) mEntry + offsetof(entry, data);
}
-size_t NBLog::FormatEntry::formatStringLength() const {
+size_t NBLog::FormatEntry::formatStringLength() const
+{
return mEntry[offsetof(entry, length)];
}
-NBLog::EntryIterator NBLog::FormatEntry::args() const {
+NBLog::EntryIterator NBLog::FormatEntry::args() const
+{
auto it = begin();
- // skip start fmt
- ++it;
- // skip timestamp
- ++it;
- // skip hash
- ++it;
+ ++it; // skip start fmt
+ ++it; // skip timestamp
+ ++it; // skip hash
// Skip author if present
if (it->type == EVENT_AUTHOR) {
++it;
@@ -110,33 +114,30 @@
return it;
}
-int64_t NBLog::FormatEntry::timestamp() const {
+int64_t NBLog::FormatEntry::timestamp() const
+{
auto it = begin();
- // skip start fmt
- ++it;
+ ++it; // skip start fmt
return it.payload<int64_t>();
}
-NBLog::log_hash_t NBLog::FormatEntry::hash() const {
+NBLog::log_hash_t NBLog::FormatEntry::hash() const
+{
auto it = begin();
- // skip start fmt
- ++it;
- // skip timestamp
- ++it;
+ ++it; // skip start fmt
+ ++it; // skip timestamp
// unaligned 64-bit read not supported
log_hash_t hash;
memcpy(&hash, it->data, sizeof(hash));
return hash;
}
-int NBLog::FormatEntry::author() const {
+int NBLog::FormatEntry::author() const
+{
auto it = begin();
- // skip start fmt
- ++it;
- // skip timestamp
- ++it;
- // skip hash
- ++it;
+ ++it; // skip start fmt
+ ++it; // skip timestamp
+ ++it; // skip hash
// if there is an author entry, return it, return -1 otherwise
if (it->type == EVENT_AUTHOR) {
return it.payload<int>();
@@ -145,19 +146,18 @@
}
NBLog::EntryIterator NBLog::FormatEntry::copyWithAuthor(
- std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
+ std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const
+{
auto it = begin();
- // copy fmt start entry
- it.copyTo(dst);
- // copy timestamp
- (++it).copyTo(dst); // copy hash
- (++it).copyTo(dst);
+ it.copyTo(dst); // copy fmt start entry
+ (++it).copyTo(dst); // copy timestamp
+ (++it).copyTo(dst); // copy hash
// insert author entry
- size_t authorEntrySize = NBLog::Entry::kOverhead + sizeof(author);
+ size_t authorEntrySize = Entry::kOverhead + sizeof(author);
uint8_t authorEntry[authorEntrySize];
authorEntry[offsetof(entry, type)] = EVENT_AUTHOR;
authorEntry[offsetof(entry, length)] =
- authorEntry[authorEntrySize + NBLog::Entry::kPreviousLengthOffset] =
+ authorEntry[authorEntrySize + Entry::kPreviousLengthOffset] =
sizeof(author);
*(int*) (&authorEntry[offsetof(entry, data)]) = author;
dst->write(authorEntry, authorEntrySize);
@@ -170,76 +170,96 @@
return it;
}
-void NBLog::EntryIterator::copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const {
- size_t length = ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
- dst->write(ptr, length);
+void NBLog::EntryIterator::copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const
+{
+ size_t length = mPtr[offsetof(entry, length)] + Entry::kOverhead;
+ dst->write(mPtr, length);
}
-void NBLog::EntryIterator::copyData(uint8_t *dst) const {
- memcpy((void*) dst, ptr + offsetof(entry, data), ptr[offsetof(entry, length)]);
+void NBLog::EntryIterator::copyData(uint8_t *dst) const
+{
+ memcpy((void*) dst, mPtr + offsetof(entry, data), mPtr[offsetof(entry, length)]);
}
-NBLog::EntryIterator::EntryIterator()
- : ptr(nullptr) {}
+NBLog::EntryIterator::EntryIterator() // Dummy initialization.
+ : mPtr(nullptr)
+{
+}
NBLog::EntryIterator::EntryIterator(const uint8_t *entry)
- : ptr(entry) {}
+ : mPtr(entry)
+{
+}
NBLog::EntryIterator::EntryIterator(const NBLog::EntryIterator &other)
- : ptr(other.ptr) {}
-
-const NBLog::entry& NBLog::EntryIterator::operator*() const {
- return *(entry*) ptr;
+ : mPtr(other.mPtr)
+{
}
-const NBLog::entry* NBLog::EntryIterator::operator->() const {
- return (entry*) ptr;
+const NBLog::entry& NBLog::EntryIterator::operator*() const
+{
+ return *(entry*) mPtr;
}
-NBLog::EntryIterator& NBLog::EntryIterator::operator++() {
- ptr += ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
+const NBLog::entry* NBLog::EntryIterator::operator->() const
+{
+ return (entry*) mPtr;
+}
+
+NBLog::EntryIterator& NBLog::EntryIterator::operator++()
+{
+ mPtr += mPtr[offsetof(entry, length)] + Entry::kOverhead;
return *this;
}
-NBLog::EntryIterator& NBLog::EntryIterator::operator--() {
- ptr -= ptr[NBLog::Entry::kPreviousLengthOffset] + NBLog::Entry::kOverhead;
+NBLog::EntryIterator& NBLog::EntryIterator::operator--()
+{
+ mPtr -= mPtr[Entry::kPreviousLengthOffset] + Entry::kOverhead;
return *this;
}
-NBLog::EntryIterator NBLog::EntryIterator::next() const {
+NBLog::EntryIterator NBLog::EntryIterator::next() const
+{
EntryIterator aux(*this);
return ++aux;
}
-NBLog::EntryIterator NBLog::EntryIterator::prev() const {
+NBLog::EntryIterator NBLog::EntryIterator::prev() const
+{
EntryIterator aux(*this);
return --aux;
}
-int NBLog::EntryIterator::operator-(const NBLog::EntryIterator &other) const {
- return ptr - other.ptr;
+int NBLog::EntryIterator::operator-(const NBLog::EntryIterator &other) const
+{
+ return mPtr - other.mPtr;
}
-bool NBLog::EntryIterator::operator!=(const EntryIterator &other) const {
- return ptr != other.ptr;
+bool NBLog::EntryIterator::operator!=(const EntryIterator &other) const
+{
+ return mPtr != other.mPtr;
}
-bool NBLog::EntryIterator::hasConsistentLength() const {
- return ptr[offsetof(entry, length)] == ptr[ptr[offsetof(entry, length)] +
- NBLog::Entry::kOverhead + NBLog::Entry::kPreviousLengthOffset];
+bool NBLog::EntryIterator::hasConsistentLength() const
+{
+ return mPtr[offsetof(entry, length)] == mPtr[mPtr[offsetof(entry, length)] +
+ Entry::kOverhead + Entry::kPreviousLengthOffset];
}
// ---------------------------------------------------------------------------
-int64_t NBLog::HistogramEntry::timestamp() const {
+int64_t NBLog::HistogramEntry::timestamp() const
+{
return EntryIterator(mEntry).payload<HistTsEntry>().ts;
}
-NBLog::log_hash_t NBLog::HistogramEntry::hash() const {
+NBLog::log_hash_t NBLog::HistogramEntry::hash() const
+{
return EntryIterator(mEntry).payload<HistTsEntry>().hash;
}
-int NBLog::HistogramEntry::author() const {
+int NBLog::HistogramEntry::author() const
+{
EntryIterator it(mEntry);
if (it->length == sizeof(HistTsEntryWithAuthor)) {
return it.payload<HistTsEntryWithAuthor>().author;
@@ -249,7 +269,8 @@
}
NBLog::EntryIterator NBLog::HistogramEntry::copyWithAuthor(
- std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
+ std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const
+{
// Current histogram entry has {type, length, struct HistTsEntry, length}.
// We now want {type, length, struct HistTsEntryWithAuthor, length}
uint8_t buffer[Entry::kOverhead + sizeof(HistTsEntryWithAuthor)];
@@ -336,9 +357,7 @@
void NBLog::Writer::log(const char *string)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
LOG_ALWAYS_FATAL_IF(string == NULL, "Attempted to log NULL string");
size_t length = strlen(string);
if (length > Entry::kMaxLength) {
@@ -349,9 +368,7 @@
void NBLog::Writer::logf(const char *fmt, ...)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
va_list ap;
va_start(ap, fmt);
Writer::logvf(fmt, ap); // the Writer:: is needed to avoid virtual dispatch for LockedWriter
@@ -360,9 +377,7 @@
void NBLog::Writer::logvf(const char *fmt, va_list ap)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
char buffer[Entry::kMaxLength + 1 /*NUL*/];
int length = vsnprintf(buffer, sizeof(buffer), fmt, ap);
if (length >= (int) sizeof(buffer)) {
@@ -377,9 +392,7 @@
void NBLog::Writer::logTimestamp()
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
int64_t ts = get_monotonic_ns();
if (ts > 0) {
log(EVENT_TIMESTAMP, &ts, sizeof(ts));
@@ -390,41 +403,31 @@
void NBLog::Writer::logTimestamp(const int64_t ts)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
log(EVENT_TIMESTAMP, &ts, sizeof(ts));
}
void NBLog::Writer::logInteger(const int x)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
log(EVENT_INTEGER, &x, sizeof(x));
}
void NBLog::Writer::logFloat(const float x)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
log(EVENT_FLOAT, &x, sizeof(x));
}
void NBLog::Writer::logPID()
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
log(EVENT_PID, mPidTag, mPidTagSize);
}
void NBLog::Writer::logStart(const char *fmt)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
size_t length = strlen(fmt);
if (length > Entry::kMaxLength) {
length = Entry::kMaxLength;
@@ -434,26 +437,20 @@
void NBLog::Writer::logEnd()
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
Entry entry = Entry(EVENT_END_FMT, NULL, 0);
- log(&entry, true);
+ log(entry, true);
}
void NBLog::Writer::logHash(log_hash_t hash)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
log(EVENT_HASH, &hash, sizeof(hash));
}
void NBLog::Writer::logEventHistTs(Event event, log_hash_t hash)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
HistTsEntry data;
data.hash = hash;
data.ts = get_monotonic_ns();
@@ -466,10 +463,7 @@
void NBLog::Writer::logFormat(const char *fmt, log_hash_t hash, ...)
{
- if (!mEnabled) {
- return;
- }
-
+ if (!mEnabled) return;
va_list ap;
va_start(ap, hash);
Writer::logVFormat(fmt, hash, ap);
@@ -478,9 +472,7 @@
void NBLog::Writer::logVFormat(const char *fmt, log_hash_t hash, va_list argp)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
Writer::logStart(fmt);
int i;
double f;
@@ -536,9 +528,7 @@
void NBLog::Writer::log(Event event, const void *data, size_t length)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
if (data == NULL || length > Entry::kMaxLength) {
// TODO Perhaps it makes sense to display truncated data or at least a
// message that the data is too long? The current behavior can create
@@ -550,19 +540,17 @@
return;
}
Entry etr(event, data, length);
- log(&etr, true /*trusted*/);
+ log(etr, true /*trusted*/);
}
-void NBLog::Writer::log(const NBLog::Entry *etr, bool trusted)
+void NBLog::Writer::log(const NBLog::Entry &etr, bool trusted)
{
- if (!mEnabled) {
- return;
- }
+ if (!mEnabled) return;
if (!trusted) {
- log(etr->mEvent, etr->mData, etr->mLength);
+ log(etr.mEvent, etr.mData, etr.mLength);
return;
}
- size_t need = etr->mLength + Entry::kOverhead; // mEvent, mLength, data[mLength], mLength
+ size_t need = etr.mLength + Entry::kOverhead; // mEvent, mLength, data[mLength], mLength
// need = number of bytes written to FIFO
// FIXME optimize this using memcpy for the data part of the Entry.
@@ -571,7 +559,7 @@
uint8_t temp[Entry::kMaxLength + Entry::kOverhead];
// write this data to temp array
for (size_t i = 0; i < need; i++) {
- temp[i] = etr->copyEntryDataAt(i);
+ temp[i] = etr.copyEntryDataAt(i);
}
// write to circular buffer
mFifoWriter->write(temp, need);
@@ -743,15 +731,14 @@
if (mFifoReader == NULL) {
return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
}
- // make a copy to avoid race condition with writer
- size_t capacity = mFifo->capacity();
// This emulates the behaviour of audio_utils_fifo_reader::read, but without incrementing the
// reader index. The index is incremented after handling corruption, to after the last complete
// entry of the buffer
size_t lost;
audio_utils_iovec iovec[2];
- ssize_t availToRead = mFifoReader->obtain(iovec, capacity, NULL /*timeout*/, &lost);
+ const ssize_t availToRead = mFifoReader->obtain(iovec, mFifo->capacity(),
+ NULL /*timeout*/, &lost);
if (availToRead <= 0) {
return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
}
@@ -800,7 +787,6 @@
snapshot->mLost = lost;
return snapshot;
-
}
// Takes raw content of the local merger FIFO, processes log entries, and
@@ -854,7 +840,7 @@
}
// FIXME: decide whether to print the warnings here or elsewhere
if (!body.isEmpty()) {
- dumpLine(timestamp, body);
+ dumpLine(×tamp, &body);
}
}
@@ -865,20 +851,22 @@
getAndProcessSnapshot(*snap);
}
-void NBLog::MergeReader::dump(int fd, int indent) {
+void NBLog::MergeReader::dump(int fd, int indent)
+{
// TODO: add a mutex around media.log dump
ReportPerformance::dump(fd, indent, mThreadPerformanceAnalysis);
}
// Writes a string to the console
-void NBLog::Reader::dumpLine(const String8 ×tamp, String8 &body)
+void NBLog::Reader::dumpLine(const String8 *timestamp, String8 *body)
{
+ if (timestamp == nullptr || body == nullptr) return;
if (mFd >= 0) {
- dprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp.string(), body.string());
+ dprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp->string(), body->string());
} else {
- ALOGI("%.*s%s %s", mIndent, "", timestamp.string(), body.string());
+ ALOGI("%.*s%s %s", mIndent, "", timestamp->string(), body->string());
}
- body.clear();
+ body->clear();
}
bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const
@@ -888,25 +876,33 @@
// ---------------------------------------------------------------------------
-void NBLog::appendTimestamp(String8 *body, const void *data) {
+void NBLog::appendTimestamp(String8 *body, const void *data)
+{
+ if (body == nullptr || data == nullptr) return;
int64_t ts;
memcpy(&ts, data, sizeof(ts));
body->appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
(int) ((ts / (1000 * 1000)) % 1000));
}
-void NBLog::appendInt(String8 *body, const void *data) {
+void NBLog::appendInt(String8 *body, const void *data)
+{
+ if (body == nullptr || data == nullptr) return;
int x = *((int*) data);
body->appendFormat("<%d>", x);
}
-void NBLog::appendFloat(String8 *body, const void *data) {
+void NBLog::appendFloat(String8 *body, const void *data)
+{
+ if (body == nullptr || data == nullptr) return;
float f;
- memcpy(&f, data, sizeof(float));
+ memcpy(&f, data, sizeof(f));
body->appendFormat("<%f>", f);
}
-void NBLog::appendPID(String8 *body, const void* data, size_t length) {
+void NBLog::appendPID(String8 *body, const void* data, size_t length)
+{
+ if (body == nullptr || data == nullptr) return;
pid_t id = *((pid_t*) data);
char * name = &((char*) data)[sizeof(pid_t)];
body->appendFormat("<PID: %d, name: %.*s>", id, (int) (length - sizeof(pid_t)), name);
@@ -915,9 +911,9 @@
String8 NBLog::bufferDump(const uint8_t *buffer, size_t size)
{
String8 str;
+ if (buffer == nullptr) return str;
str.append("[ ");
- for(size_t i = 0; i < size; i++)
- {
+ for(size_t i = 0; i < size; i++) {
str.appendFormat("%d ", buffer[i]);
}
str.append("]");
@@ -931,7 +927,8 @@
NBLog::EntryIterator NBLog::Reader::handleFormat(const FormatEntry &fmtEntry,
String8 *timestamp,
- String8 *body) {
+ String8 *body)
+{
// log timestamp
int64_t ts = fmtEntry.timestamp();
timestamp->clear();
@@ -947,7 +944,7 @@
handleAuthor(fmtEntry, body);
// log string
- NBLog::EntryIterator arg = fmtEntry.args();
+ EntryIterator arg = fmtEntry.args();
const char* fmt = fmtEntry.formatString();
size_t fmt_length = fmtEntry.formatStringLength();
@@ -1026,10 +1023,11 @@
new audio_utils_fifo(size, sizeof(uint8_t),
mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL)
- {}
+{
+}
-void NBLog::Merger::addReader(const NBLog::NamedReader &reader) {
-
+void NBLog::Merger::addReader(const NBLog::NamedReader &reader)
+{
// FIXME This is called by binder thread in MediaLogService::registerWriter
// but the access to shared variable mNamedReaders is not yet protected by a lock.
mNamedReaders.push_back(reader);
@@ -1044,25 +1042,23 @@
MergeItem(int64_t ts, int index): ts(ts), index(index) {}
};
-// operators needed for priority queue in merge
-// bool operator>(const int64_t &t1, const int64_t &t2) {
-// return t1.tv_sec > t2.tv_sec || (t1.tv_sec == t2.tv_sec && t1.tv_nsec > t2.tv_nsec);
-// }
-
-bool operator>(const struct MergeItem &i1, const struct MergeItem &i2) {
+bool operator>(const struct MergeItem &i1, const struct MergeItem &i2)
+{
return i1.ts > i2.ts || (i1.ts == i2.ts && i1.index > i2.index);
}
// Merge registered readers, sorted by timestamp, and write data to a single FIFO in local memory
-void NBLog::Merger::merge() {
+void NBLog::Merger::merge()
+{
// FIXME This is called by merge thread
// but the access to shared variable mNamedReaders is not yet protected by a lock.
- int nLogs = mNamedReaders.size();
+ const int nLogs = mNamedReaders.size();
std::vector<std::unique_ptr<NBLog::Reader::Snapshot>> snapshots(nLogs);
- std::vector<NBLog::EntryIterator> offsets(nLogs);
+ std::vector<EntryIterator> offsets;
+ offsets.reserve(nLogs);
for (int i = 0; i < nLogs; ++i) {
snapshots[i] = mNamedReaders[i].reader()->getSnapshot();
- offsets[i] = snapshots[i]->begin();
+ offsets.push_back(snapshots[i]->begin());
}
// initialize offsets
// TODO custom heap implementation could allow to update top, improving performance
@@ -1071,17 +1067,19 @@
for (int i = 0; i < nLogs; ++i)
{
if (offsets[i] != snapshots[i]->end()) {
- int64_t ts = AbstractEntry::buildEntry(offsets[i])->timestamp();
- timestamps.emplace(ts, i);
+ std::unique_ptr<AbstractEntry> abstractEntry = AbstractEntry::buildEntry(offsets[i]);
+ if (abstractEntry == nullptr) {
+ continue;
+ }
+ timestamps.emplace(abstractEntry->timestamp(), i);
}
}
while (!timestamps.empty()) {
- // find minimum timestamp
- int index = timestamps.top().index;
+ int index = timestamps.top().index; // find minimum timestamp
// copy it to the log, increasing offset
- offsets[index] = AbstractEntry::buildEntry(offsets[index])->copyWithAuthor(mFifoWriter,
- index);
+ offsets[index] = AbstractEntry::buildEntry(offsets[index])->
+ copyWithAuthor(mFifoWriter, index);
// update data structures
timestamps.pop();
if (offsets[index] != snapshots[index]->end()) {
@@ -1091,7 +1089,8 @@
}
}
-const std::vector<NBLog::NamedReader>& NBLog::Merger::getNamedReaders() const {
+const std::vector<NBLog::NamedReader>& NBLog::Merger::getNamedReaders() const
+{
// FIXME This is returning a reference to a shared variable that needs a lock
return mNamedReaders;
}
@@ -1099,10 +1098,16 @@
// ---------------------------------------------------------------------------
NBLog::MergeReader::MergeReader(const void *shared, size_t size, Merger &merger)
- : Reader(shared, size), mNamedReaders(merger.getNamedReaders()) {}
+ : Reader(shared, size), mNamedReaders(merger.getNamedReaders())
+{
+}
-void NBLog::MergeReader::handleAuthor(const NBLog::AbstractEntry &entry, String8 *body) {
+void NBLog::MergeReader::handleAuthor(const NBLog::AbstractEntry &entry, String8 *body)
+{
int author = entry.author();
+ if (author == -1) {
+ return;
+ }
// FIXME Needs a lock
const char* name = mNamedReaders[author].name();
body->appendFormat("%s: ", name);
@@ -1113,16 +1118,20 @@
NBLog::MergeThread::MergeThread(NBLog::Merger &merger, NBLog::MergeReader &mergeReader)
: mMerger(merger),
mMergeReader(mergeReader),
- mTimeoutUs(0) {}
+ mTimeoutUs(0)
+{
+}
-NBLog::MergeThread::~MergeThread() {
+NBLog::MergeThread::~MergeThread()
+{
// set exit flag, set timeout to 0 to force threadLoop to exit and wait for the thread to join
requestExit();
setTimeoutUs(0);
join();
}
-bool NBLog::MergeThread::threadLoop() {
+bool NBLog::MergeThread::threadLoop()
+{
bool doMerge;
{
AutoMutex _l(mMutex);
@@ -1144,11 +1153,13 @@
return true;
}
-void NBLog::MergeThread::wakeup() {
+void NBLog::MergeThread::wakeup()
+{
setTimeoutUs(kThreadWakeupPeriodUs);
}
-void NBLog::MergeThread::setTimeoutUs(int time) {
+void NBLog::MergeThread::setTimeoutUs(int time)
+{
AutoMutex _l(mMutex);
mTimeoutUs = time;
mCond.signal();
diff --git a/media/libnblog/PerformanceAnalysis.cpp b/media/libnblog/PerformanceAnalysis.cpp
index f09e93d..3418dc0 100644
--- a/media/libnblog/PerformanceAnalysis.cpp
+++ b/media/libnblog/PerformanceAnalysis.cpp
@@ -32,6 +32,7 @@
#include <sys/prctl.h>
#include <time.h>
#include <new>
+#include <audio_utils/LogPlot.h>
#include <audio_utils/roundup.h>
#include <media/nblog/NBLog.h>
#include <media/nblog/PerformanceAnalysis.h>
@@ -208,27 +209,6 @@
return isOutlier;
}
-static int widthOf(int x) {
- int width = 0;
- if (x < 0) {
- width++;
- x = x == INT_MIN ? INT_MAX : -x;
- }
- // assert (x >= 0)
- do {
- ++width;
- x /= 10;
- } while (x > 0);
- return width;
-}
-
-// computes the column width required for a specific histogram value
-inline int numberWidth(double number, int leftPadding) {
- // Added values account for whitespaces needed around numbers, and for the
- // dot and decimal digit not accounted for by widthOf
- return std::max(std::max(widthOf(static_cast<int>(number)) + 3, 2), leftPadding + 1);
-}
-
// rounds value to precision based on log-distance from mean
__attribute__((no_sanitize("signed-integer-overflow")))
inline double logRound(double x, double mean) {
@@ -254,7 +234,7 @@
// of PerformanceAnalysis
void PerformanceAnalysis::reportPerformance(String8 *body, int author, log_hash_t hash,
int maxHeight) {
- if (mHists.empty()) {
+ if (mHists.empty() || body == nullptr) {
return;
}
@@ -273,69 +253,16 @@
}
}
- // underscores and spaces length corresponds to maximum width of histogram
- static const int kLen = 200;
- std::string underscores(kLen, '_');
- std::string spaces(kLen, ' ');
-
- auto it = buckets.begin();
- double maxDelta = it->first;
- int maxCount = it->second;
- // Compute maximum values
- while (++it != buckets.end()) {
- if (it->first > maxDelta) {
- maxDelta = it->first;
- }
- if (it->second > maxCount) {
- maxCount = it->second;
- }
- }
- int height = log2(maxCount) + 1; // maxCount > 0, safe to call log2
- const int leftPadding = widthOf(1 << height);
- const int bucketWidth = numberWidth(maxDelta, leftPadding);
- int scalingFactor = 1;
- // scale data if it exceeds maximum height
- if (height > maxHeight) {
- scalingFactor = (height + maxHeight) / maxHeight;
- height /= scalingFactor;
- }
- body->appendFormat("\n%*s %3.2f %s", leftPadding + 11,
- "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:");
- body->appendFormat("\n%*s%d, %lld, %lld\n", leftPadding + 11,
+ static const int SIZE = 128;
+ char title[SIZE];
+ snprintf(title, sizeof(title), "\n%s %3.2f %s\n%s%d, %lld, %lld\n",
+ "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:",
"Thread, hash, starting timestamp: ", author,
- static_cast<long long int>(hash), static_cast<long long int>(startingTs));
- // write histogram label line with bucket values
- body->appendFormat("\n%s", " ");
- body->appendFormat("%*s", leftPadding, " ");
- for (auto const &x : buckets) {
- const int colWidth = numberWidth(x.first, leftPadding);
- body->appendFormat("%*d", colWidth, x.second);
- }
- // write histogram ascii art
- body->appendFormat("\n%s", " ");
- for (int row = height * scalingFactor; row >= 0; row -= scalingFactor) {
- const int value = 1 << row;
- body->appendFormat("%.*s", leftPadding, spaces.c_str());
- for (auto const &x : buckets) {
- const int colWidth = numberWidth(x.first, leftPadding);
- body->appendFormat("%.*s%s", colWidth - 1,
- spaces.c_str(), x.second < value ? " " : "|");
- }
- body->appendFormat("\n%s", " ");
- }
- // print x-axis
- const int columns = static_cast<int>(buckets.size());
- body->appendFormat("%*c", leftPadding, ' ');
- body->appendFormat("%.*s", (columns + 1) * bucketWidth, underscores.c_str());
- body->appendFormat("\n%s", " ");
+ static_cast<long long>(hash), static_cast<long long>(startingTs));
+ static const char * const kLabel = "ms";
- // write footer with bucket labels
- body->appendFormat("%*s", leftPadding, " ");
- for (auto const &x : buckets) {
- const int colWidth = numberWidth(x.first, leftPadding);
- body->appendFormat("%*.*f", colWidth, 1, x.first);
- }
- body->appendFormat("%.*s%s", bucketWidth, spaces.c_str(), "ms\n");
+ body->appendFormat("%s",
+ audio_utils_plot_histogram(buckets, title, kLabel, maxHeight).c_str());
// Now report glitches
body->appendFormat("\ntime elapsed between glitches and glitch timestamps:\n");
diff --git a/media/libnblog/include/media/nblog/NBLog.h b/media/libnblog/include/media/nblog/NBLog.h
index fb6f179..bee3ad3 100644
--- a/media/libnblog/include/media/nblog/NBLog.h
+++ b/media/libnblog/include/media/nblog/NBLog.h
@@ -98,7 +98,12 @@
// entry iterator
class EntryIterator {
public:
+ // Used for dummy initialization. Performing operations on a default-constructed
+ // EntryIterator other than assigning it to another valid EntryIterator
+ // is undefined behavior.
EntryIterator();
+ // Caller's responsibility to make sure entry is not nullptr.
+ // Passing in nullptr can result in undefined behavior.
explicit EntryIterator(const uint8_t *entry);
EntryIterator(const EntryIterator &other);
@@ -109,7 +114,9 @@
EntryIterator& operator++(); // ++i
// back to previous entry
EntryIterator& operator--(); // --i
+ // returns an EntryIterator corresponding to the next entry
EntryIterator next() const;
+ // returns an EntryIterator corresponding to the previous entry
EntryIterator prev() const;
bool operator!=(const EntryIterator &other) const;
int operator-(const EntryIterator &other) const;
@@ -120,25 +127,22 @@
template<typename T>
inline const T& payload() {
- return *reinterpret_cast<const T *>(ptr + offsetof(entry, data));
+ return *reinterpret_cast<const T *>(mPtr + offsetof(entry, data));
}
inline operator const uint8_t*() const {
- return ptr;
+ return mPtr;
}
private:
- const uint8_t *ptr;
+ const uint8_t *mPtr; // Should not be nullptr except for dummy initialization
};
class AbstractEntry {
public:
-
- // Entry starting in the given pointer
- explicit AbstractEntry(const uint8_t *entry);
virtual ~AbstractEntry() {}
- // build concrete entry of appropriate class from pointer
+ // build concrete entry of appropriate class from ptr.
static std::unique_ptr<AbstractEntry> buildEntry(const uint8_t *ptr);
// get format entry timestamp
@@ -158,6 +162,8 @@
int author) const = 0;
protected:
+ // Entry starting in the given pointer, which shall not be nullptr.
+ explicit AbstractEntry(const uint8_t *entry);
// copies ordinary entry from src to dst, and returns length of entry
// size_t copyEntry(audio_utils_fifo_writer *dst, const iterator &it);
const uint8_t *mEntry;
@@ -360,7 +366,7 @@
// writes a single Entry to the FIFO
void log(Event event, const void *data, size_t length);
// checks validity of an event before calling log above this one
- void log(const Entry *entry, bool trusted = false);
+ void log(const Entry &entry, bool trusted = false);
Shared* const mShared; // raw pointer to shared memory
sp<IMemory> mIMemory; // ref-counted version, initialized in constructor
@@ -432,7 +438,6 @@
EntryIterator end() { return mEnd; }
private:
- friend class MergeReader;
friend class Reader;
uint8_t *mData;
size_t mLost;
@@ -454,7 +459,7 @@
protected:
// print a summary of the performance to the console
- void dumpLine(const String8& timestamp, String8& body);
+ void dumpLine(const String8 *timestamp, String8 *body);
EntryIterator handleFormat(const FormatEntry &fmtEntry,
String8 *timestamp,
String8 *body);
diff --git a/media/libnblog/include/media/nblog/PerformanceAnalysis.h b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
index ddfe9d6..56e0ea6 100644
--- a/media/libnblog/include/media/nblog/PerformanceAnalysis.h
+++ b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
@@ -25,6 +25,8 @@
namespace android {
+class String8;
+
namespace ReportPerformance {
class PerformanceAnalysis;
diff --git a/media/libnblog/include/media/nblog/ReportPerformance.h b/media/libnblog/include/media/nblog/ReportPerformance.h
index ec0842f..1b11197 100644
--- a/media/libnblog/include/media/nblog/ReportPerformance.h
+++ b/media/libnblog/include/media/nblog/ReportPerformance.h
@@ -23,9 +23,6 @@
namespace android {
-// The String8 class is used by reportPerformance function
-class String8;
-
namespace ReportPerformance {
constexpr int kMsPerSec = 1000;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 7f39d10..3526047 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -5556,6 +5556,11 @@
break;
}
+ case kWhatCheckIfStuck: {
+ ALOGV("No-op by default");
+ break;
+ }
+
default:
return false;
}
@@ -7873,6 +7878,18 @@
break;
}
+ case kWhatCheckIfStuck:
+ {
+ int32_t generation = 0;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation == mCodec->mStateGeneration) {
+ mCodec->signalError(OMX_ErrorUndefined, TIMED_OUT);
+ }
+
+ handled = true;
+ break;
+ }
+
default:
handled = BaseState::onMessageReceived(msg);
break;
@@ -7884,6 +7901,11 @@
void ACodec::OutputPortSettingsChangedState::stateEntered() {
ALOGV("[%s] Now handling output port settings change",
mCodec->mComponentName.c_str());
+
+ // If we haven't transitioned after 3 seconds, we're probably stuck.
+ sp<AMessage> msg = new AMessage(ACodec::kWhatCheckIfStuck, mCodec);
+ msg->setInt32("generation", mCodec->mStateGeneration);
+ msg->post(3000000);
}
bool ACodec::OutputPortSettingsChangedState::onOMXFrameRendered(
@@ -8146,6 +8168,11 @@
ALOGV("[%s] Now Flushing", mCodec->mComponentName.c_str());
mFlushComplete[kPortIndexInput] = mFlushComplete[kPortIndexOutput] = false;
+
+ // If we haven't transitioned after 3 seconds, we're probably stuck.
+ sp<AMessage> msg = new AMessage(ACodec::kWhatCheckIfStuck, mCodec);
+ msg->setInt32("generation", mCodec->mStateGeneration);
+ msg->post(3000000);
}
bool ACodec::FlushingState::onMessageReceived(const sp<AMessage> &msg) {
@@ -8160,6 +8187,7 @@
msg->setInt32("generation", mCodec->mStateGeneration);
msg->post(3000000);
}
+ handled = true;
break;
}
@@ -8180,6 +8208,18 @@
break;
}
+ case kWhatCheckIfStuck:
+ {
+ int32_t generation = 0;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation == mCodec->mStateGeneration) {
+ mCodec->signalError(OMX_ErrorUndefined, TIMED_OUT);
+ }
+
+ handled = true;
+ break;
+ }
+
default:
handled = BaseState::onMessageReceived(msg);
break;
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 710ae68..266a240 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -129,6 +129,7 @@
secureHandle = static_cast<native_handle_t *>(secureData->getDestinationPointer());
}
ssize_t result = -1;
+ ssize_t codecDataOffset = 0;
if (mCrypto != NULL) {
ICrypto::DestinationBuffer destination;
if (secure) {
@@ -180,9 +181,16 @@
Status status = Status::OK;
hidl_string detailedError;
+ ScramblingControl sctrl = ScramblingControl::UNSCRAMBLED;
+
+ if (key != NULL) {
+ sctrl = (ScramblingControl)key[0];
+ // Adjust for the PES offset
+ codecDataOffset = key[2] | (key[3] << 8);
+ }
auto returnVoid = mDescrambler->descramble(
- key != NULL ? (ScramblingControl)key[0] : ScramblingControl::UNSCRAMBLED,
+ sctrl,
hidlSubSamples,
srcBuffer,
0,
@@ -202,6 +210,11 @@
return UNKNOWN_ERROR;
}
+ if (result < codecDataOffset) {
+ ALOGD("invalid codec data offset: %zd, result %zd", codecDataOffset, result);
+ return BAD_VALUE;
+ }
+
ALOGV("descramble succeeded, %zd bytes", result);
if (dstBuffer.type == BufferType::SHARED_MEMORY) {
@@ -210,7 +223,7 @@
}
}
- it->mCodecBuffer->setRange(0, result);
+ it->mCodecBuffer->setRange(codecDataOffset, result - codecDataOffset);
// Copy metadata from client to codec buffer.
it->mCodecBuffer->meta()->clear();
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 48e351b..27383cb 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -105,6 +105,7 @@
"DataConverter.cpp",
"DataSourceFactory.cpp",
"DataURISource.cpp",
+ "ClearFileSource.cpp",
"FileSource.cpp",
"FrameDecoder.cpp",
"HTTPBase.cpp",
@@ -121,6 +122,7 @@
"MediaCodecSource.cpp",
"MediaExtractorFactory.cpp",
"MediaSync.cpp",
+ "http/ClearMediaHTTP.cpp",
"http/MediaHTTP.cpp",
"MediaMuxer.cpp",
"NuCachedSource2.cpp",
@@ -232,9 +234,9 @@
srcs: [
"CallbackDataSource.cpp",
"CallbackMediaSource.cpp",
- "DataSourceFactory.cpp",
+ "ClearDataSourceFactory.cpp",
+ "ClearFileSource.cpp",
"DataURISource.cpp",
- "FileSource.cpp",
"HTTPBase.cpp",
"HevcUtils.cpp",
"InterfaceUtils.cpp",
@@ -246,13 +248,12 @@
"RemoteMediaSource.cpp",
"Utils.cpp",
"VideoFrameScheduler.cpp",
- "http/MediaHTTP.cpp",
+ "http/ClearMediaHTTP.cpp",
],
shared_libs: [
"libbinder",
"libcutils",
- "libdrmframework",
"libgui",
"liblog",
"libmedia_player2_util",
diff --git a/media/libstagefright/ClearDataSourceFactory.cpp b/media/libstagefright/ClearDataSourceFactory.cpp
new file mode 100644
index 0000000..5d23fda
--- /dev/null
+++ b/media/libstagefright/ClearDataSourceFactory.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearDataSourceFactory"
+
+#include "include/HTTPBase.h"
+#include "include/NuCachedSource2.h"
+
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
+#include <media/stagefright/ClearFileSource.h>
+#include <media/stagefright/ClearMediaHTTP.h>
+#include <media/stagefright/ClearDataSourceFactory.h>
+#include <media/stagefright/DataURISource.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// static
+sp<DataSource> ClearDataSourceFactory::CreateFromURI(
+ const sp<MediaHTTPService> &httpService,
+ const char *uri,
+ const KeyedVector<String8, String8> *headers,
+ String8 *contentType,
+ HTTPBase *httpSource) {
+ if (contentType != NULL) {
+ *contentType = "";
+ }
+
+ sp<DataSource> source;
+ if (!strncasecmp("file://", uri, 7)) {
+ source = new ClearFileSource(uri + 7);
+ } else if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
+ if (httpService == NULL) {
+ ALOGE("Invalid http service!");
+ return NULL;
+ }
+
+ if (httpSource == NULL) {
+ sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ if (conn == NULL) {
+ ALOGE("Failed to make http connection from http service!");
+ return NULL;
+ }
+ httpSource = new ClearMediaHTTP(conn);
+ }
+
+ String8 cacheConfig;
+ bool disconnectAtHighwatermark = false;
+ KeyedVector<String8, String8> nonCacheSpecificHeaders;
+ if (headers != NULL) {
+ nonCacheSpecificHeaders = *headers;
+ NuCachedSource2::RemoveCacheSpecificHeaders(
+ &nonCacheSpecificHeaders,
+ &cacheConfig,
+ &disconnectAtHighwatermark);
+ }
+
+ if (httpSource->connect(uri, &nonCacheSpecificHeaders) != OK) {
+ ALOGE("Failed to connect http source!");
+ return NULL;
+ }
+
+ if (contentType != NULL) {
+ *contentType = httpSource->getMIMEType();
+ }
+
+ source = NuCachedSource2::Create(
+ httpSource,
+ cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+ disconnectAtHighwatermark);
+ } else if (!strncasecmp("data:", uri, 5)) {
+ source = DataURISource::Create(uri);
+ } else {
+ // Assume it's a filename.
+ source = new ClearFileSource(uri);
+ }
+
+ if (source == NULL || source->initCheck() != OK) {
+ return NULL;
+ }
+
+ return source;
+}
+
+sp<DataSource> ClearDataSourceFactory::CreateFromFd(int fd, int64_t offset, int64_t length) {
+ sp<ClearFileSource> source = new ClearFileSource(fd, offset, length);
+ return source->initCheck() != OK ? nullptr : source;
+}
+
+sp<DataSource> ClearDataSourceFactory::CreateMediaHTTP(const sp<MediaHTTPService> &httpService) {
+ if (httpService == NULL) {
+ return NULL;
+ }
+
+ sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ if (conn == NULL) {
+ return NULL;
+ } else {
+ return new ClearMediaHTTP(conn);
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/ClearFileSource.cpp b/media/libstagefright/ClearFileSource.cpp
new file mode 100644
index 0000000..e3a2cb7
--- /dev/null
+++ b/media/libstagefright/ClearFileSource.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearFileSource"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/ClearFileSource.h>
+#include <media/stagefright/Utils.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+namespace android {
+
+ClearFileSource::ClearFileSource(const char *filename)
+ : mFd(-1),
+ mOffset(0),
+ mLength(-1),
+ mName("<null>") {
+
+ if (filename) {
+ mName = String8::format("FileSource(%s)", filename);
+ }
+ ALOGV("%s", filename);
+ mFd = open(filename, O_LARGEFILE | O_RDONLY);
+
+ if (mFd >= 0) {
+ mLength = lseek64(mFd, 0, SEEK_END);
+ } else {
+ ALOGE("Failed to open file '%s'. (%s)", filename, strerror(errno));
+ }
+}
+
+ClearFileSource::ClearFileSource(int fd, int64_t offset, int64_t length)
+ : mFd(fd),
+ mOffset(offset),
+ mLength(length),
+ mName("<null>") {
+ ALOGV("fd=%d (%s), offset=%lld, length=%lld",
+ fd, nameForFd(fd).c_str(), (long long) offset, (long long) length);
+
+ if (mOffset < 0) {
+ mOffset = 0;
+ }
+ if (mLength < 0) {
+ mLength = 0;
+ }
+ if (mLength > INT64_MAX - mOffset) {
+ mLength = INT64_MAX - mOffset;
+ }
+ struct stat s;
+ if (fstat(fd, &s) == 0) {
+ if (mOffset > s.st_size) {
+ mOffset = s.st_size;
+ mLength = 0;
+ }
+ if (mOffset + mLength > s.st_size) {
+ mLength = s.st_size - mOffset;
+ }
+ }
+ if (mOffset != offset || mLength != length) {
+ ALOGW("offset/length adjusted from %lld/%lld to %lld/%lld",
+ (long long) offset, (long long) length,
+ (long long) mOffset, (long long) mLength);
+ }
+
+ mName = String8::format(
+ "FileSource(fd(%s), %lld, %lld)",
+ nameForFd(fd).c_str(),
+ (long long) mOffset,
+ (long long) mLength);
+
+}
+
+ClearFileSource::~ClearFileSource() {
+ if (mFd >= 0) {
+ ::close(mFd);
+ mFd = -1;
+ }
+}
+
+status_t ClearFileSource::initCheck() const {
+ return mFd >= 0 ? OK : NO_INIT;
+}
+
+ssize_t ClearFileSource::readAt(off64_t offset, void *data, size_t size) {
+ if (mFd < 0) {
+ return NO_INIT;
+ }
+
+ Mutex::Autolock autoLock(mLock);
+ if (mLength >= 0) {
+ if (offset >= mLength) {
+ return 0; // read beyond EOF.
+ }
+ uint64_t numAvailable = mLength - offset;
+ if ((uint64_t)size > numAvailable) {
+ size = numAvailable;
+ }
+ }
+ return readAt_l(offset, data, size);
+}
+
+ssize_t ClearFileSource::readAt_l(off64_t offset, void *data, size_t size) {
+ off64_t result = lseek64(mFd, offset + mOffset, SEEK_SET);
+ if (result == -1) {
+ ALOGE("seek to %lld failed", (long long)(offset + mOffset));
+ return UNKNOWN_ERROR;
+ }
+
+ return ::read(mFd, data, size);
+}
+
+status_t ClearFileSource::getSize(off64_t *size) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mFd < 0) {
+ return NO_INIT;
+ }
+
+ *size = mLength;
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index eef5314..aee7fd8 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -22,90 +22,28 @@
#include <media/stagefright/FileSource.h>
#include <media/stagefright/Utils.h>
#include <private/android_filesystem_config.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
namespace android {
FileSource::FileSource(const char *filename)
- : mFd(-1),
- mOffset(0),
- mLength(-1),
- mName("<null>"),
+ : ClearFileSource(filename),
mDecryptHandle(NULL),
mDrmManagerClient(NULL),
mDrmBufOffset(0),
mDrmBufSize(0),
mDrmBuf(NULL){
-
- if (filename) {
- mName = String8::format("FileSource(%s)", filename);
- }
- ALOGV("%s", filename);
- mFd = open(filename, O_LARGEFILE | O_RDONLY);
-
- if (mFd >= 0) {
- mLength = lseek64(mFd, 0, SEEK_END);
- } else {
- ALOGE("Failed to open file '%s'. (%s)", filename, strerror(errno));
- }
}
FileSource::FileSource(int fd, int64_t offset, int64_t length)
- : mFd(fd),
- mOffset(offset),
- mLength(length),
- mName("<null>"),
+ : ClearFileSource(fd, offset, length),
mDecryptHandle(NULL),
mDrmManagerClient(NULL),
mDrmBufOffset(0),
mDrmBufSize(0),
mDrmBuf(NULL) {
- ALOGV("fd=%d (%s), offset=%lld, length=%lld",
- fd, nameForFd(fd).c_str(), (long long) offset, (long long) length);
-
- if (mOffset < 0) {
- mOffset = 0;
- }
- if (mLength < 0) {
- mLength = 0;
- }
- if (mLength > INT64_MAX - mOffset) {
- mLength = INT64_MAX - mOffset;
- }
- struct stat s;
- if (fstat(fd, &s) == 0) {
- if (mOffset > s.st_size) {
- mOffset = s.st_size;
- mLength = 0;
- }
- if (mOffset + mLength > s.st_size) {
- mLength = s.st_size - mOffset;
- }
- }
- if (mOffset != offset || mLength != length) {
- ALOGW("offset/length adjusted from %lld/%lld to %lld/%lld",
- (long long) offset, (long long) length,
- (long long) mOffset, (long long) mLength);
- }
-
- mName = String8::format(
- "FileSource(fd(%s), %lld, %lld)",
- nameForFd(fd).c_str(),
- (long long) mOffset,
- (long long) mLength);
-
}
FileSource::~FileSource() {
- if (mFd >= 0) {
- ::close(mFd);
- mFd = -1;
- }
-
if (mDrmBuf != NULL) {
delete[] mDrmBuf;
mDrmBuf = NULL;
@@ -124,10 +62,6 @@
}
}
-status_t FileSource::initCheck() const {
- return mFd >= 0 ? OK : NO_INIT;
-}
-
ssize_t FileSource::readAt(off64_t offset, void *data, size_t size) {
if (mFd < 0) {
return NO_INIT;
@@ -147,30 +81,12 @@
if (mDecryptHandle != NULL && DecryptApiType::CONTAINER_BASED
== mDecryptHandle->decryptApiType) {
- return readAtDRM(offset, data, size);
+ return readAtDRM_l(offset, data, size);
} else {
- off64_t result = lseek64(mFd, offset + mOffset, SEEK_SET);
- if (result == -1) {
- ALOGE("seek to %lld failed", (long long)(offset + mOffset));
- return UNKNOWN_ERROR;
- }
-
- return ::read(mFd, data, size);
+ return readAt_l(offset, data, size);
}
}
-status_t FileSource::getSize(off64_t *size) {
- Mutex::Autolock autoLock(mLock);
-
- if (mFd < 0) {
- return NO_INIT;
- }
-
- *size = mLength;
-
- return OK;
-}
-
sp<DecryptHandle> FileSource::DrmInitialization(const char *mime) {
if (getuid() == AID_MEDIA_EX) return nullptr; // no DRM in media extractor
if (mDrmManagerClient == NULL) {
@@ -194,7 +110,7 @@
return mDecryptHandle;
}
-ssize_t FileSource::readAtDRM(off64_t offset, void *data, size_t size) {
+ssize_t FileSource::readAtDRM_l(off64_t offset, void *data, size_t size) {
size_t DRM_CACHE_SIZE = 1024;
if (mDrmBuf == NULL) {
mDrmBuf = new unsigned char[DRM_CACHE_SIZE];
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 6e94517..3370df1 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -501,9 +501,10 @@
return ERROR_MALFORMED;
}
- int32_t width, height;
+ int32_t width, height, stride;
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
+ CHECK(outputFormat->findInt32("stride", &stride));
int32_t crop_left, crop_top, crop_right, crop_bottom;
if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
@@ -530,11 +531,10 @@
if (converter.isValid()) {
converter.convert(
(const uint8_t *)videoFrameBuffer->data(),
- width, height,
+ width, height, stride,
crop_left, crop_top, crop_right, crop_bottom,
frame->getFlattenedData(),
- frame->mWidth,
- frame->mHeight,
+ frame->mWidth, frame->mHeight, frame->mRowBytes,
crop_left, crop_top, crop_right, crop_bottom);
return OK;
}
@@ -681,9 +681,10 @@
return ERROR_MALFORMED;
}
- int32_t width, height;
+ int32_t width, height, stride;
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
+ CHECK(outputFormat->findInt32("stride", &stride));
if (mFrame == NULL) {
sp<IMemory> frameMem = allocVideoFrame(
@@ -727,11 +728,10 @@
if (converter.isValid()) {
converter.convert(
(const uint8_t *)videoFrameBuffer->data(),
- width, height,
+ width, height, stride,
crop_left, crop_top, crop_right, crop_bottom,
mFrame->getFlattenedData(),
- mFrame->mWidth,
- mFrame->mHeight,
+ mFrame->mWidth, mFrame->mHeight, mFrame->mRowBytes,
dstLeft, dstTop, dstRight, dstBottom);
return OK;
}
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 353e407..f91c543 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -2674,7 +2674,7 @@
CHECK(msg->senderAwaitsResponse(&replyID));
if (mFlags & kFlagIsAsync) {
- ALOGE("dequeueOutputBuffer can't be used in async mode");
+ ALOGE("dequeueInputBuffer can't be used in async mode");
PostReplyWithError(replyID, INVALID_OPERATION);
break;
}
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index f6c61a0..2d4bd39 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -123,7 +123,7 @@
};
Mutex MediaExtractorFactory::gPluginMutex;
-std::shared_ptr<List<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
+std::shared_ptr<std::list<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
bool MediaExtractorFactory::gPluginsRegistered = false;
// static
@@ -133,7 +133,7 @@
*confidence = 0.0f;
*meta = nullptr;
- std::shared_ptr<List<sp<ExtractorPlugin>>> plugins;
+ std::shared_ptr<std::list<sp<ExtractorPlugin>>> plugins;
{
Mutex::Autolock autoLock(gPluginMutex);
if (!gPluginsRegistered) {
@@ -145,6 +145,7 @@
MediaExtractor::CreatorFunc curCreator = NULL;
MediaExtractor::CreatorFunc bestCreator = NULL;
for (auto it = plugins->begin(); it != plugins->end(); ++it) {
+ ALOGV("sniffing %s", (*it)->def.extractor_name);
float newConfidence;
void *newMeta = nullptr;
MediaExtractor::FreeMetaFunc newFreeMeta = nullptr;
@@ -171,7 +172,7 @@
// static
void MediaExtractorFactory::RegisterExtractor(const sp<ExtractorPlugin> &plugin,
- List<sp<ExtractorPlugin>> &pluginList) {
+ std::list<sp<ExtractorPlugin>> &pluginList) {
// sanity check check struct version, uuid, name
if (plugin->def.def_version == 0
|| plugin->def.def_version > MediaExtractor::EXTRACTORDEF_VERSION) {
@@ -213,7 +214,7 @@
//static
void MediaExtractorFactory::RegisterExtractorsInApk(
- const char *apkPath, List<sp<ExtractorPlugin>> &pluginList) {
+ const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList) {
ALOGV("search for plugins at %s", apkPath);
ZipArchiveHandle zipHandle;
int32_t ret = OpenArchive(apkPath, &zipHandle);
@@ -261,7 +262,7 @@
//static
void MediaExtractorFactory::RegisterExtractorsInSystem(
- const char *libDirPath, List<sp<ExtractorPlugin>> &pluginList) {
+ const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
ALOGV("search for plugins at %s", libDirPath);
DIR *libDir = opendir(libDirPath);
if (libDir) {
@@ -291,6 +292,10 @@
}
}
+static bool compareFunc(const sp<ExtractorPlugin>& first, const sp<ExtractorPlugin>& second) {
+ return strcmp(first->def.extractor_name, second->def.extractor_name) < 0;
+}
+
// static
void MediaExtractorFactory::UpdateExtractors(const char *newUpdateApkPath) {
Mutex::Autolock autoLock(gPluginMutex);
@@ -301,7 +306,7 @@
return;
}
- std::shared_ptr<List<sp<ExtractorPlugin>>> newList(new List<sp<ExtractorPlugin>>());
+ std::shared_ptr<std::list<sp<ExtractorPlugin>>> newList(new std::list<sp<ExtractorPlugin>>());
RegisterExtractorsInSystem("/system/lib"
#ifdef __LP64__
@@ -319,6 +324,7 @@
RegisterExtractorsInApk(newUpdateApkPath, *newList);
}
+ newList->sort(compareFunc);
gPlugins = newList;
gPluginsRegistered = true;
}
diff --git a/media/libstagefright/MetaDataUtils.cpp b/media/libstagefright/MetaDataUtils.cpp
index 04f6ade..2475e7b 100644
--- a/media/libstagefright/MetaDataUtils.cpp
+++ b/media/libstagefright/MetaDataUtils.cpp
@@ -16,8 +16,10 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MetaDataUtils"
+#include <utils/Log.h>
#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaDataUtils.h>
@@ -25,6 +27,10 @@
namespace android {
bool MakeAVCCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size) {
+ if (data == nullptr || size == 0) {
+ return false;
+ }
+
int32_t width;
int32_t height;
int32_t sarWidth;
@@ -46,6 +52,44 @@
return true;
}
+bool MakeAACCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size) {
+ if (data == nullptr || size < 7) {
+ return false;
+ }
+
+ ABitReader bits(data, size);
+
+ // adts_fixed_header
+
+ if (bits.getBits(12) != 0xfffu) {
+ ALOGE("Wrong atds_fixed_header");
+ return false;
+ }
+
+ bits.skipBits(4); // ID, layer, protection_absent
+
+ unsigned profile = bits.getBits(2);
+ if (profile == 3u) {
+ ALOGE("profile should not be 3");
+ return false;
+ }
+ unsigned sampling_freq_index = bits.getBits(4);
+ bits.getBits(1); // private_bit
+ unsigned channel_configuration = bits.getBits(3);
+ if (channel_configuration == 0u) {
+ ALOGE("channel_config should not be 0");
+ return false;
+ }
+
+ if (!MakeAACCodecSpecificData(
+ meta, profile, sampling_freq_index, channel_configuration)) {
+ return false;
+ }
+
+ meta.setInt32(kKeyIsADTS, true);
+ return true;
+}
+
bool MakeAACCodecSpecificData(
MetaDataBase &meta,
unsigned profile, unsigned sampling_freq_index,
diff --git a/media/libstagefright/StagefrightPluginLoader.cpp b/media/libstagefright/StagefrightPluginLoader.cpp
index 519e870..dd5903a 100644
--- a/media/libstagefright/StagefrightPluginLoader.cpp
+++ b/media/libstagefright/StagefrightPluginLoader.cpp
@@ -46,7 +46,7 @@
}
mCreateInputSurface = (CodecBase::CreateInputSurfaceFunc)dlsym(
mLibHandle, "CreateInputSurface");
- if (mCreateBuilder == nullptr) {
+ if (mCreateInputSurface == nullptr) {
ALOGD("Failed to find symbol: CreateInputSurface (%s)", dlerror());
}
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index cf5e91e..ada37a6 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1577,6 +1577,8 @@
{ MEDIA_MIMETYPE_AUDIO_VORBIS, AUDIO_FORMAT_VORBIS },
{ MEDIA_MIMETYPE_AUDIO_OPUS, AUDIO_FORMAT_OPUS},
{ MEDIA_MIMETYPE_AUDIO_AC3, AUDIO_FORMAT_AC3},
+ { MEDIA_MIMETYPE_AUDIO_EAC3, AUDIO_FORMAT_E_AC3},
+ { MEDIA_MIMETYPE_AUDIO_AC4, AUDIO_FORMAT_AC4},
{ MEDIA_MIMETYPE_AUDIO_FLAC, AUDIO_FORMAT_FLAC},
{ 0, AUDIO_FORMAT_INVALID }
};
@@ -1867,4 +1869,3 @@
}
} // namespace android
-
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
index 75ca846..9c0fcfa 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
@@ -186,8 +186,8 @@
#ifdef DEC_INTERNAL_MEMORY_OPT
video->vol[idx] = IMEM_vol[idx];
video->memoryUsage += sizeof(Vol);
- oscl_memset(video->vol[idx], 0, sizeof(Vol));
if (video->vol[idx] == NULL) status = PV_FALSE;
+ else oscl_memset(video->vol[idx], 0, sizeof(Vol));
stream = IMEM_BitstreamDecVideo;
#else
video->vol[idx] = (Vol *) oscl_malloc(sizeof(Vol));
@@ -213,6 +213,7 @@
else
{
int32 buffer_size;
+ oscl_memset(stream, 0, sizeof(BitstreamDecVideo));
if ((buffer_size = BitstreamOpen(stream, idx)) < 0)
{
mp4dec_log("InitVideoDecoder(): Can't allocate bitstream buffer.\n");
@@ -339,27 +340,33 @@
#ifdef DEC_INTERNAL_MEMORY_OPT
video->currVop->yChan = IMEM_currVop_yChan; /* Allocate memory for all VOP OKA 3/2/1*/
if (video->currVop->yChan == NULL) status = PV_FALSE;
- video->currVop->uChan = video->currVop->yChan + size;
- video->currVop->vChan = video->currVop->uChan + (size >> 2);
+ else {
+ video->currVop->uChan = video->currVop->yChan + size;
+ video->currVop->vChan = video->currVop->uChan + (size >> 2);
+ }
video->prevVop->yChan = IMEM_prevVop_yChan; /* Allocate memory for all VOP OKA 3/2/1*/
if (video->prevVop->yChan == NULL) status = PV_FALSE;
- video->prevVop->uChan = video->prevVop->yChan + size;
- video->prevVop->vChan = video->prevVop->uChan + (size >> 2);
+ else {
+ video->prevVop->uChan = video->prevVop->yChan + size;
+ video->prevVop->vChan = video->prevVop->uChan + (size >> 2);
+ }
#else
if (size > INT32_MAX / 3) {
return PV_FALSE;
}
video->currVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/
if (video->currVop->yChan == NULL) status = PV_FALSE;
-
- video->currVop->uChan = video->currVop->yChan + size;
- video->currVop->vChan = video->currVop->uChan + (size >> 2);
+ else {
+ video->currVop->uChan = video->currVop->yChan + size;
+ video->currVop->vChan = video->currVop->uChan + (size >> 2);
+ }
video->prevVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/
if (video->prevVop->yChan == NULL) status = PV_FALSE;
-
- video->prevVop->uChan = video->prevVop->yChan + size;
- video->prevVop->vChan = video->prevVop->uChan + (size >> 2);
+ else {
+ video->prevVop->uChan = video->prevVop->yChan + size;
+ video->prevVop->vChan = video->prevVop->uChan + (size >> 2);
+ }
#endif
video->memoryUsage += (size * 3);
#endif // MEMORY_POOL
@@ -383,8 +390,10 @@
video->prevEnhcVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/
if (video->prevEnhcVop->yChan == NULL) status = PV_FALSE;
- video->prevEnhcVop->uChan = video->prevEnhcVop->yChan + size;
- video->prevEnhcVop->vChan = video->prevEnhcVop->uChan + (size >> 2);
+ else {
+ video->prevEnhcVop->uChan = video->prevEnhcVop->yChan + size;
+ video->prevEnhcVop->vChan = video->prevEnhcVop->uChan + (size >> 2);
+ }
video->memoryUsage += (3 * size / 2);
#endif
}
@@ -431,10 +440,12 @@
#else
video->sliceNo = (uint8 *) oscl_malloc(nTotalMB);
if (video->sliceNo == NULL) status = PV_FALSE;
+ else oscl_memset(video->sliceNo, 0, nTotalMB);
video->memoryUsage += nTotalMB;
video->acPredFlag = (uint8 *) oscl_malloc(nTotalMB * sizeof(uint8));
if (video->acPredFlag == NULL) status = PV_FALSE;
+ else oscl_memset(video->acPredFlag, 0, nTotalMB * sizeof(uint8));
video->memoryUsage += (nTotalMB);
if ((size_t)nTotalMB > SIZE_MAX / sizeof(typeDCStore)) {
@@ -442,6 +453,7 @@
}
video->predDC = (typeDCStore *) oscl_malloc(nTotalMB * sizeof(typeDCStore));
if (video->predDC == NULL) status = PV_FALSE;
+ else oscl_memset(video->predDC, 0, nTotalMB * sizeof(typeDCStore));
video->memoryUsage += (nTotalMB * sizeof(typeDCStore));
if (nMBPerRow > INT32_MAX - 1
@@ -450,6 +462,7 @@
}
video->predDCAC_col = (typeDCACStore *) oscl_malloc((nMBPerRow + 1) * sizeof(typeDCACStore));
if (video->predDCAC_col == NULL) status = PV_FALSE;
+ else oscl_memset(video->predDCAC_col, 0, (nMBPerRow + 1) * sizeof(typeDCACStore));
video->memoryUsage += ((nMBPerRow + 1) * sizeof(typeDCACStore));
/* element zero will be used for storing vertical (col) AC coefficients */
@@ -459,9 +472,11 @@
/* Allocating HeaderInfo structure & Quantizer array */
video->headerInfo.Mode = (uint8 *) oscl_malloc(nTotalMB);
if (video->headerInfo.Mode == NULL) status = PV_FALSE;
+ else oscl_memset(video->headerInfo.Mode, 0, nTotalMB);
video->memoryUsage += nTotalMB;
video->headerInfo.CBP = (uint8 *) oscl_malloc(nTotalMB);
if (video->headerInfo.CBP == NULL) status = PV_FALSE;
+ else oscl_memset (video->headerInfo.CBP, 0, nTotalMB);
video->memoryUsage += nTotalMB;
if ((size_t)nTotalMB > SIZE_MAX / sizeof(int16)) {
@@ -469,6 +484,7 @@
}
video->QPMB = (int16 *) oscl_malloc(nTotalMB * sizeof(int16));
if (video->QPMB == NULL) status = PV_FALSE;
+ else memset(video->QPMB, 0x0, nTotalMB * sizeof(int16));
video->memoryUsage += (nTotalMB * sizeof(int));
/* Allocating macroblock space */
@@ -489,8 +505,10 @@
}
video->motX = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB);
if (video->motX == NULL) status = PV_FALSE;
+ else memset(video->motX, 0, sizeof(MOT) * 4 * nTotalMB);
video->motY = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB);
if (video->motY == NULL) status = PV_FALSE;
+ else memset(video->motY, 0, sizeof(MOT) * 4 * nTotalMB);
video->memoryUsage += (sizeof(MOT) * 8 * nTotalMB);
#endif
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 2364684..cd984f0 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -114,7 +114,7 @@
mConfig->crcEnabled = false;
uint32_t memRequirements = pvmp3_decoderMemRequirements();
- mDecoderBuf = malloc(memRequirements);
+ mDecoderBuf = calloc(1, memRequirements);
pvmp3_InitDecoder(mConfig, mDecoderBuf);
mIsFirst = true;
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
index 813004b..942f850 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -431,7 +431,7 @@
}
if (mInputBufferCount == 0) {
- CHECK(mHeader == NULL);
+ delete mHeader;
mHeader = new OpusHeader();
memset(mHeader, 0, sizeof(*mHeader));
if (!ParseOpusHeader(data, size, mHeader)) {
@@ -452,6 +452,9 @@
}
int status = OPUS_INVALID_STATE;
+ if (mDecoder != NULL) {
+ opus_multistream_decoder_destroy(mDecoder);
+ }
mDecoder = opus_multistream_decoder_create(kRate,
mHeader->channels,
mHeader->num_streams,
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 05f4104..c46a40f 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -85,9 +85,15 @@
|| mDstFormat == OMX_COLOR_Format32bitBGRA8888;
}
+/*
+ * If stride is non-zero, client's stride will be used. For planar
+ * or semi-planar YUV formats, stride must be even numbers.
+ * If stride is zero, it will be calculated based on width and bpp
+ * of the format, assuming no padding on the right edge.
+ */
ColorConverter::BitmapParams::BitmapParams(
void *bits,
- size_t width, size_t height,
+ size_t width, size_t height, size_t stride,
size_t cropLeft, size_t cropTop,
size_t cropRight, size_t cropBottom,
OMX_COLOR_FORMATTYPE colorFromat)
@@ -101,6 +107,8 @@
mCropBottom(cropBottom) {
switch(mColorFormat) {
case OMX_COLOR_Format16bitRGB565:
+ case OMX_COLOR_FormatYUV420Planar16:
+ case OMX_COLOR_FormatCbYCrY:
mBpp = 2;
mStride = 2 * mWidth;
break;
@@ -112,13 +120,7 @@
mStride = 4 * mWidth;
break;
- case OMX_COLOR_FormatYUV420Planar16:
- mBpp = 2;
- mStride = 2 * mWidth;
- break;
-
case OMX_COLOR_FormatYUV420Planar:
- case OMX_COLOR_FormatCbYCrY:
case OMX_QCOM_COLOR_FormatYVU420SemiPlanar:
case OMX_COLOR_FormatYUV420SemiPlanar:
case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
@@ -132,6 +134,10 @@
mStride = mWidth;
break;
}
+ // use client's stride if it's specified.
+ if (stride != 0) {
+ mStride = stride;
+ }
}
size_t ColorConverter::BitmapParams::cropWidth() const {
@@ -144,21 +150,21 @@
status_t ColorConverter::convert(
const void *srcBits,
- size_t srcWidth, size_t srcHeight,
+ size_t srcWidth, size_t srcHeight, size_t srcStride,
size_t srcCropLeft, size_t srcCropTop,
size_t srcCropRight, size_t srcCropBottom,
void *dstBits,
- size_t dstWidth, size_t dstHeight,
+ size_t dstWidth, size_t dstHeight, size_t dstStride,
size_t dstCropLeft, size_t dstCropTop,
size_t dstCropRight, size_t dstCropBottom) {
BitmapParams src(
const_cast<void *>(srcBits),
- srcWidth, srcHeight,
+ srcWidth, srcHeight, srcStride,
srcCropLeft, srcCropTop, srcCropRight, srcCropBottom, mSrcFormat);
BitmapParams dst(
dstBits,
- dstWidth, dstHeight,
+ dstWidth, dstHeight, dstStride,
dstCropLeft, dstCropTop, dstCropRight, dstCropBottom, mDstFormat);
if (!((src.mCropLeft & 1) == 0
@@ -792,15 +798,15 @@
uint8_t *kAdjustedClip = initClip();
- uint16_t *dst_ptr = (uint16_t *)dst.mBits
- + dst.mCropTop * dst.mWidth + dst.mCropLeft;
+ uint16_t *dst_ptr = (uint16_t *)((uint8_t *)
+ dst.mBits + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp);
const uint8_t *src_y =
- (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
+ (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft;
const uint8_t *src_u =
- (const uint8_t *)src_y + src.mWidth * src.mHeight
- + src.mCropTop * src.mWidth + src.mCropLeft;
+ (const uint8_t *)src.mBits + src.mHeight * src.mStride +
+ src.mCropTop * src.mStride / 2 + src.mCropLeft;
for (size_t y = 0; y < src.cropHeight(); ++y) {
for (size_t x = 0; x < src.cropWidth(); x += 2) {
@@ -842,13 +848,13 @@
}
}
- src_y += src.mWidth;
+ src_y += src.mStride;
if (y & 1) {
- src_u += src.mWidth;
+ src_u += src.mStride;
}
- dst_ptr += dst.mWidth;
+ dst_ptr = (uint16_t*)((uint8_t*)dst_ptr + dst.mStride);
}
return OK;
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 657a05b..359df3d 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -44,6 +44,7 @@
mNativeWindow(nativeWindow),
mWidth(0),
mHeight(0),
+ mStride(0),
mCropLeft(0),
mCropTop(0),
mCropRight(0),
@@ -67,9 +68,10 @@
int32_t colorFormatNew;
CHECK(format->findInt32("color-format", &colorFormatNew));
- int32_t widthNew, heightNew;
- CHECK(format->findInt32("stride", &widthNew));
+ int32_t widthNew, heightNew, strideNew;
+ CHECK(format->findInt32("width", &widthNew));
CHECK(format->findInt32("slice-height", &heightNew));
+ CHECK(format->findInt32("stride", &strideNew));
int32_t cropLeftNew, cropTopNew, cropRightNew, cropBottomNew;
if (!format->findRect(
@@ -106,6 +108,7 @@
mColorFormat = static_cast<OMX_COLOR_FORMATTYPE>(colorFormatNew);
mWidth = widthNew;
mHeight = heightNew;
+ mStride = strideNew;
mCropLeft = cropLeftNew;
mCropTop = cropTopNew;
mCropRight = cropRightNew;
@@ -276,20 +279,15 @@
if (mConverter) {
mConverter->convert(
data,
- mWidth, mHeight,
+ mWidth, mHeight, mStride,
mCropLeft, mCropTop, mCropRight, mCropBottom,
dst,
- buf->stride, buf->height,
+ buf->stride, buf->height, 0,
0, 0, mCropWidth - 1, mCropHeight - 1);
} else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
- const uint8_t *src_y = (const uint8_t *)data;
- const uint8_t *src_u =
- (const uint8_t *)data + mWidth * mHeight;
- const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
-
- src_y +=mCropLeft + mCropTop * mWidth;
- src_u +=(mCropLeft + mCropTop * mWidth / 2)/2;
- src_v +=(mCropLeft + mCropTop * mWidth / 2)/2;
+ const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft;
+ const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
+ const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
uint8_t *dst_y = (uint8_t *)dst;
size_t dst_y_size = buf->stride * buf->height;
@@ -305,7 +303,7 @@
for (int y = 0; y < mCropHeight; ++y) {
memcpy(dst_y, src_y, mCropWidth);
- src_y += mWidth;
+ src_y += mStride;
dst_y += buf->stride;
}
@@ -313,19 +311,15 @@
memcpy(dst_u, src_u, (mCropWidth + 1) / 2);
memcpy(dst_v, src_v, (mCropWidth + 1) / 2);
- src_u += mWidth / 2;
- src_v += mWidth / 2;
+ src_u += mStride / 2;
+ src_v += mStride / 2;
dst_u += dst_c_stride;
dst_v += dst_c_stride;
}
} else if (mColorFormat == OMX_COLOR_FormatYUV420Planar16) {
- const uint16_t *src_y = (const uint16_t *)data;
- const uint16_t *src_u = (const uint16_t *)data + mWidth * mHeight;
- const uint16_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
-
- src_y += mCropLeft + mCropTop * mWidth;
- src_u += (mCropLeft + mCropTop * mWidth / 2) / 2;
- src_v += (mCropLeft + mCropTop * mWidth / 2) / 2;
+ const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft * 2;
+ const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
+ const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
uint8_t *dst_y = (uint8_t *)dst;
size_t dst_y_size = buf->stride * buf->height;
@@ -340,21 +334,21 @@
for (int y = 0; y < mCropHeight; ++y) {
for (int x = 0; x < mCropWidth; ++x) {
- dst_y[x] = (uint8_t)(src_y[x] >> 2);
+ dst_y[x] = (uint8_t)(((uint16_t *)src_y)[x] >> 2);
}
- src_y += mWidth;
+ src_y += mStride;
dst_y += buf->stride;
}
for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
for (int x = 0; x < (mCropWidth + 1) / 2; ++x) {
- dst_u[x] = (uint8_t)(src_u[x] >> 2);
- dst_v[x] = (uint8_t)(src_v[x] >> 2);
+ dst_u[x] = (uint8_t)(((uint16_t *)src_u)[x] >> 2);
+ dst_v[x] = (uint8_t)(((uint16_t *)src_v)[x] >> 2);
}
- src_u += mWidth / 2;
- src_v += mWidth / 2;
+ src_u += mStride / 2;
+ src_v += mStride / 2;
dst_u += dst_c_stride;
dst_v += dst_c_stride;
}
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 1695c75..a32cf08 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -50,6 +50,7 @@
const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index 25be89f..b165bcb 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -52,6 +52,7 @@
extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
diff --git a/media/libstagefright/http/ClearMediaHTTP.cpp b/media/libstagefright/http/ClearMediaHTTP.cpp
new file mode 100644
index 0000000..bfbad1e
--- /dev/null
+++ b/media/libstagefright/http/ClearMediaHTTP.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearMediaHTTP"
+#include <utils/Log.h>
+
+#include <media/stagefright/ClearMediaHTTP.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/Utils.h>
+
+#include <media/MediaHTTPConnection.h>
+
+namespace android {
+
+ClearMediaHTTP::ClearMediaHTTP(const sp<MediaHTTPConnection> &conn)
+ : mInitCheck((conn != NULL) ? OK : NO_INIT),
+ mHTTPConnection(conn),
+ mCachedSizeValid(false),
+ mCachedSize(0ll) {
+}
+
+ClearMediaHTTP::~ClearMediaHTTP() {
+}
+
+status_t ClearMediaHTTP::connect(
+ const char *uri,
+ const KeyedVector<String8, String8> *headers,
+ off64_t /* offset */) {
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ KeyedVector<String8, String8> extHeaders;
+ if (headers != NULL) {
+ extHeaders = *headers;
+ }
+
+ if (extHeaders.indexOfKey(String8("User-Agent")) < 0) {
+ extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
+ }
+
+ mLastURI = uri;
+ // reconnect() calls with uri == old mLastURI.c_str(), which gets zapped
+ // as part of the above assignment. Ensure no accidental later use.
+ uri = NULL;
+
+ bool success = mHTTPConnection->connect(mLastURI.c_str(), &extHeaders);
+
+ mLastHeaders = extHeaders;
+
+ mCachedSizeValid = false;
+
+ if (success) {
+ AString sanitized = uriDebugString(mLastURI);
+ mName = String8::format("ClearMediaHTTP(%s)", sanitized.c_str());
+ }
+
+ return success ? OK : UNKNOWN_ERROR;
+}
+
+void ClearMediaHTTP::disconnect() {
+ mName = String8("ClearMediaHTTP(<disconnected>)");
+ if (mInitCheck != OK) {
+ return;
+ }
+
+ mHTTPConnection->disconnect();
+}
+
+status_t ClearMediaHTTP::initCheck() const {
+ return mInitCheck;
+}
+
+ssize_t ClearMediaHTTP::readAt(off64_t offset, void *data, size_t size) {
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ int64_t startTimeUs = ALooper::GetNowUs();
+
+ size_t numBytesRead = 0;
+ while (numBytesRead < size) {
+ size_t copy = size - numBytesRead;
+
+ if (copy > 64 * 1024) {
+ // limit the buffer sizes transferred across binder boundaries
+ // to avoid spurious transaction failures.
+ copy = 64 * 1024;
+ }
+
+ ssize_t n = mHTTPConnection->readAt(
+ offset + numBytesRead, (uint8_t *)data + numBytesRead, copy);
+
+ if (n < 0) {
+ return n;
+ } else if (n == 0) {
+ break;
+ }
+
+ numBytesRead += n;
+ }
+
+ int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
+
+ addBandwidthMeasurement(numBytesRead, delayUs);
+
+ return numBytesRead;
+}
+
+status_t ClearMediaHTTP::getSize(off64_t *size) {
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ // Caching the returned size so that it stays valid even after a
+ // disconnect. NuCachedSource2 relies on this.
+
+ if (!mCachedSizeValid) {
+ mCachedSize = mHTTPConnection->getSize();
+ mCachedSizeValid = true;
+ }
+
+ *size = mCachedSize;
+
+ return *size < 0 ? *size : static_cast<status_t>(OK);
+}
+
+uint32_t ClearMediaHTTP::flags() {
+ return kWantsPrefetching | kIsHTTPBasedSource;
+}
+
+status_t ClearMediaHTTP::reconnectAtOffset(off64_t offset) {
+ return connect(mLastURI.c_str(), &mLastHeaders, offset);
+}
+
+
+String8 ClearMediaHTTP::getUri() {
+ if (mInitCheck != OK) {
+ return String8::empty();
+ }
+
+ String8 uri;
+ if (OK == mHTTPConnection->getUri(&uri)) {
+ return uri;
+ }
+ return String8(mLastURI.c_str());
+}
+
+String8 ClearMediaHTTP::getMIMEType() const {
+ if (mInitCheck != OK) {
+ return String8("application/octet-stream");
+ }
+
+ String8 mimeType;
+ status_t err = mHTTPConnection->getMIMEType(&mimeType);
+
+ if (err != OK) {
+ return String8("application/octet-stream");
+ }
+
+ return mimeType;
+}
+
+} // namespace android
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
index 7c9247e..0fba3dc 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -30,10 +30,7 @@
namespace android {
MediaHTTP::MediaHTTP(const sp<MediaHTTPConnection> &conn)
- : mInitCheck((conn != NULL) ? OK : NO_INIT),
- mHTTPConnection(conn),
- mCachedSizeValid(false),
- mCachedSize(0ll),
+ : ClearMediaHTTP(conn),
mDrmManagerClient(NULL) {
}
@@ -41,117 +38,6 @@
clearDRMState_l();
}
-status_t MediaHTTP::connect(
- const char *uri,
- const KeyedVector<String8, String8> *headers,
- off64_t /* offset */) {
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- KeyedVector<String8, String8> extHeaders;
- if (headers != NULL) {
- extHeaders = *headers;
- }
-
- if (extHeaders.indexOfKey(String8("User-Agent")) < 0) {
- extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
- }
-
- mLastURI = uri;
- // reconnect() calls with uri == old mLastURI.c_str(), which gets zapped
- // as part of the above assignment. Ensure no accidental later use.
- uri = NULL;
-
- bool success = mHTTPConnection->connect(mLastURI.c_str(), &extHeaders);
-
- mLastHeaders = extHeaders;
-
- mCachedSizeValid = false;
-
- if (success) {
- AString sanitized = uriDebugString(mLastURI);
- mName = String8::format("MediaHTTP(%s)", sanitized.c_str());
- }
-
- return success ? OK : UNKNOWN_ERROR;
-}
-
-void MediaHTTP::disconnect() {
- mName = String8("MediaHTTP(<disconnected>)");
- if (mInitCheck != OK) {
- return;
- }
-
- mHTTPConnection->disconnect();
-}
-
-status_t MediaHTTP::initCheck() const {
- return mInitCheck;
-}
-
-ssize_t MediaHTTP::readAt(off64_t offset, void *data, size_t size) {
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- int64_t startTimeUs = ALooper::GetNowUs();
-
- size_t numBytesRead = 0;
- while (numBytesRead < size) {
- size_t copy = size - numBytesRead;
-
- if (copy > 64 * 1024) {
- // limit the buffer sizes transferred across binder boundaries
- // to avoid spurious transaction failures.
- copy = 64 * 1024;
- }
-
- ssize_t n = mHTTPConnection->readAt(
- offset + numBytesRead, (uint8_t *)data + numBytesRead, copy);
-
- if (n < 0) {
- return n;
- } else if (n == 0) {
- break;
- }
-
- numBytesRead += n;
- }
-
- int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
-
- addBandwidthMeasurement(numBytesRead, delayUs);
-
- return numBytesRead;
-}
-
-status_t MediaHTTP::getSize(off64_t *size) {
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- // Caching the returned size so that it stays valid even after a
- // disconnect. NuCachedSource2 relies on this.
-
- if (!mCachedSizeValid) {
- mCachedSize = mHTTPConnection->getSize();
- mCachedSizeValid = true;
- }
-
- *size = mCachedSize;
-
- return *size < 0 ? *size : static_cast<status_t>(OK);
-}
-
-uint32_t MediaHTTP::flags() {
- return kWantsPrefetching | kIsHTTPBasedSource;
-}
-
-status_t MediaHTTP::reconnectAtOffset(off64_t offset) {
- return connect(mLastURI.c_str(), &mLastHeaders, offset);
-}
-
// DRM...
sp<DecryptHandle> MediaHTTP::DrmInitialization(const char* mime) {
@@ -176,33 +62,6 @@
return mDecryptHandle;
}
-String8 MediaHTTP::getUri() {
- if (mInitCheck != OK) {
- return String8::empty();
- }
-
- String8 uri;
- if (OK == mHTTPConnection->getUri(&uri)) {
- return uri;
- }
- return String8(mLastURI.c_str());
-}
-
-String8 MediaHTTP::getMIMEType() const {
- if (mInitCheck != OK) {
- return String8("application/octet-stream");
- }
-
- String8 mimeType;
- status_t err = mHTTPConnection->getMIMEType(&mimeType);
-
- if (err != OK) {
- return String8("application/octet-stream");
- }
-
- return mimeType;
-}
-
void MediaHTTP::clearDRMState_l() {
if (mDecryptHandle != NULL) {
// To release mDecryptHandle
diff --git a/media/libstagefright/httplive/HTTPDownloader.cpp b/media/libstagefright/httplive/HTTPDownloader.cpp
index 72604e3..59265fe 100644
--- a/media/libstagefright/httplive/HTTPDownloader.cpp
+++ b/media/libstagefright/httplive/HTTPDownloader.cpp
@@ -26,8 +26,8 @@
#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaHTTP.h>
-#include <media/stagefright/FileSource.h>
+#include <media/stagefright/ClearMediaHTTP.h>
+#include <media/stagefright/ClearFileSource.h>
#include <openssl/aes.h>
#include <openssl/md5.h>
#include <utils/Mutex.h>
@@ -38,7 +38,7 @@
HTTPDownloader::HTTPDownloader(
const sp<MediaHTTPService> &httpService,
const KeyedVector<String8, String8> &headers) :
- mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())),
+ mHTTPDataSource(new ClearMediaHTTP(httpService->makeHTTPConnection())),
mExtraHeaders(headers),
mDisconnecting(false) {
}
@@ -91,7 +91,7 @@
if (reconnect) {
if (!strncasecmp(url, "file://", 7)) {
- mDataSource = new FileSource(url + 7);
+ mDataSource = new ClearFileSource(url + 7);
} else if (strncasecmp(url, "http://", 7)
&& strncasecmp(url, "https://", 8)) {
return ERROR_UNSUPPORTED;
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index c286516..64dca4e 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -51,7 +51,7 @@
ColorConverter *mConverter;
YUVMode mYUVMode;
sp<ANativeWindow> mNativeWindow;
- int32_t mWidth, mHeight;
+ int32_t mWidth, mHeight, mStride;
int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
int32_t mCropWidth, mCropHeight;
int32_t mRotationDegrees;
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 97d15a7..1137cf1 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -137,6 +137,7 @@
kWhatOMXDied = 'OMXd',
kWhatReleaseCodecInstance = 'relC',
kWhatForceStateTransition = 'fstt',
+ kWhatCheckIfStuck = 'Cstk',
};
enum {
diff --git a/media/libstagefright/include/media/stagefright/ClearDataSourceFactory.h b/media/libstagefright/include/media/stagefright/ClearDataSourceFactory.h
new file mode 100644
index 0000000..12bcdd3
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/ClearDataSourceFactory.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DATA_SOURCE_FACTORY2_H_
+
+#define DATA_SOURCE_FACTORY2_H_
+
+#include <sys/types.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct MediaHTTPService;
+class String8;
+struct HTTPBase;
+
+class ClearDataSourceFactory {
+public:
+ static sp<DataSource> CreateFromURI(
+ const sp<MediaHTTPService> &httpService,
+ const char *uri,
+ const KeyedVector<String8, String8> *headers = NULL,
+ String8 *contentType = NULL,
+ HTTPBase *httpSource = NULL);
+
+ static sp<DataSource> CreateMediaHTTP(const sp<MediaHTTPService> &httpService);
+ static sp<DataSource> CreateFromFd(int fd, int64_t offset, int64_t length);
+};
+
+} // namespace android
+
+#endif // DATA_SOURCE_FACTORY2_H_
diff --git a/media/libstagefright/include/media/stagefright/ClearFileSource.h b/media/libstagefright/include/media/stagefright/ClearFileSource.h
new file mode 100644
index 0000000..be83748
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/ClearFileSource.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_FILE_SOURCE_H_
+
+#define CLEAR_FILE_SOURCE_H_
+
+#include <stdio.h>
+
+#include <media/DataSource.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class ClearFileSource : public DataSource {
+public:
+ ClearFileSource(const char *filename);
+ // ClearFileSource takes ownership and will close the fd
+ ClearFileSource(int fd, int64_t offset, int64_t length);
+
+ virtual status_t initCheck() const;
+
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+
+ virtual status_t getSize(off64_t *size);
+
+ virtual uint32_t flags() {
+ return kIsLocalFileSource;
+ }
+
+ virtual String8 toString() {
+ return mName;
+ }
+
+protected:
+ virtual ~ClearFileSource();
+ virtual ssize_t readAt_l(off64_t offset, void *data, size_t size);
+
+ int mFd;
+ int64_t mOffset;
+ int64_t mLength;
+ Mutex mLock;
+
+private:
+ String8 mName;
+
+ ClearFileSource(const ClearFileSource &);
+ ClearFileSource &operator=(const ClearFileSource &);
+};
+
+} // namespace android
+
+#endif // CLEAR_FILE_SOURCE_H_
+
diff --git a/media/libstagefright/include/media/stagefright/ClearMediaHTTP.h b/media/libstagefright/include/media/stagefright/ClearMediaHTTP.h
new file mode 100644
index 0000000..7fe9c74
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/ClearMediaHTTP.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_MEDIA_HTTP_H_
+
+#define CLEAR_MEDIA_HTTP_H_
+
+#include <media/stagefright/foundation/AString.h>
+
+#include "include/HTTPBase.h"
+
+namespace android {
+
+struct MediaHTTPConnection;
+
+struct ClearMediaHTTP : public HTTPBase {
+ ClearMediaHTTP(const sp<MediaHTTPConnection> &conn);
+
+ virtual status_t connect(
+ const char *uri,
+ const KeyedVector<String8, String8> *headers,
+ off64_t offset);
+
+ virtual void disconnect();
+
+ virtual status_t initCheck() const;
+
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+
+ virtual status_t getSize(off64_t *size);
+
+ virtual uint32_t flags();
+
+ virtual status_t reconnectAtOffset(off64_t offset);
+
+protected:
+ virtual ~ClearMediaHTTP();
+
+ virtual String8 getUri();
+ virtual String8 getMIMEType() const;
+
+ AString mLastURI;
+
+private:
+ status_t mInitCheck;
+ sp<MediaHTTPConnection> mHTTPConnection;
+
+ KeyedVector<String8, String8> mLastHeaders;
+
+ bool mCachedSizeValid;
+ off64_t mCachedSize;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ClearMediaHTTP);
+};
+
+} // namespace android
+
+#endif // CLEAR_MEDIA_HTTP_H_
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index 5b3543d..2b8c7c8 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -37,11 +37,11 @@
status_t convert(
const void *srcBits,
- size_t srcWidth, size_t srcHeight,
+ size_t srcWidth, size_t srcHeight, size_t srcStride,
size_t srcCropLeft, size_t srcCropTop,
size_t srcCropRight, size_t srcCropBottom,
void *dstBits,
- size_t dstWidth, size_t dstHeight,
+ size_t dstWidth, size_t dstHeight, size_t dstStride,
size_t dstCropLeft, size_t dstCropTop,
size_t dstCropRight, size_t dstCropBottom);
@@ -49,7 +49,7 @@
struct BitmapParams {
BitmapParams(
void *bits,
- size_t width, size_t height,
+ size_t width, size_t height, size_t stride,
size_t cropLeft, size_t cropTop,
size_t cropRight, size_t cropBottom,
OMX_COLOR_FORMATTYPE colorFromat);
diff --git a/media/libstagefright/include/media/stagefright/FileSource.h b/media/libstagefright/include/media/stagefright/FileSource.h
index 8604890..b610eef 100644
--- a/media/libstagefright/include/media/stagefright/FileSource.h
+++ b/media/libstagefright/include/media/stagefright/FileSource.h
@@ -20,47 +20,29 @@
#include <stdio.h>
-#include <media/DataSource.h>
+#include <media/stagefright/ClearFileSource.h>
#include <media/stagefright/MediaErrors.h>
#include <utils/threads.h>
#include <drm/DrmManagerClient.h>
namespace android {
-class FileSource : public DataSource {
+class FileSource : public ClearFileSource {
public:
FileSource(const char *filename);
// FileSource takes ownership and will close the fd
FileSource(int fd, int64_t offset, int64_t length);
- virtual status_t initCheck() const;
-
virtual ssize_t readAt(off64_t offset, void *data, size_t size);
- virtual status_t getSize(off64_t *size);
-
- virtual uint32_t flags() {
- return kIsLocalFileSource;
- }
-
virtual sp<DecryptHandle> DrmInitialization(const char *mime);
- virtual String8 toString() {
- return mName;
- }
-
static bool requiresDrm(int fd, int64_t offset, int64_t length, const char *mime);
protected:
virtual ~FileSource();
private:
- int mFd;
- int64_t mOffset;
- int64_t mLength;
- Mutex mLock;
- String8 mName;
-
/*for DRM*/
sp<DecryptHandle> mDecryptHandle;
DrmManagerClient *mDrmManagerClient;
@@ -68,7 +50,7 @@
ssize_t mDrmBufSize;
unsigned char *mDrmBuf;
- ssize_t readAtDRM(off64_t offset, void *data, size_t size);
+ ssize_t readAtDRM_l(off64_t offset, void *data, size_t size);
FileSource(const FileSource &);
FileSource &operator=(const FileSource &);
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
index fb9f5bd..d5f4b35 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -22,7 +22,6 @@
#include <media/IMediaExtractor.h>
#include <media/MediaExtractor.h>
-#include <utils/List.h>
namespace android {
@@ -40,15 +39,15 @@
private:
static Mutex gPluginMutex;
- static std::shared_ptr<List<sp<ExtractorPlugin>>> gPlugins;
+ static std::shared_ptr<std::list<sp<ExtractorPlugin>>> gPlugins;
static bool gPluginsRegistered;
static void RegisterExtractorsInApk(
- const char *apkPath, List<sp<ExtractorPlugin>> &pluginList);
+ const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList);
static void RegisterExtractorsInSystem(
- const char *libDirPath, List<sp<ExtractorPlugin>> &pluginList);
+ const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
static void RegisterExtractor(
- const sp<ExtractorPlugin> &plugin, List<sp<ExtractorPlugin>> &pluginList);
+ const sp<ExtractorPlugin> &plugin, std::list<sp<ExtractorPlugin>> &pluginList);
static MediaExtractor::CreatorFunc sniff(DataSourceBase *source,
float *confidence, void **meta, MediaExtractor::FreeMetaFunc *freeMeta,
diff --git a/media/libstagefright/include/media/stagefright/MediaHTTP.h b/media/libstagefright/include/media/stagefright/MediaHTTP.h
index fe0e613..acaa6c4 100644
--- a/media/libstagefright/include/media/stagefright/MediaHTTP.h
+++ b/media/libstagefright/include/media/stagefright/MediaHTTP.h
@@ -19,50 +19,21 @@
#define MEDIA_HTTP_H_
#include <media/stagefright/foundation/AString.h>
-
-#include "include/HTTPBase.h"
+#include <media/stagefright/ClearMediaHTTP.h>
namespace android {
struct MediaHTTPConnection;
-struct MediaHTTP : public HTTPBase {
+struct MediaHTTP : public ClearMediaHTTP {
MediaHTTP(const sp<MediaHTTPConnection> &conn);
- virtual status_t connect(
- const char *uri,
- const KeyedVector<String8, String8> *headers,
- off64_t offset);
-
- virtual void disconnect();
-
- virtual status_t initCheck() const;
-
- virtual ssize_t readAt(off64_t offset, void *data, size_t size);
-
- virtual status_t getSize(off64_t *size);
-
- virtual uint32_t flags();
-
- virtual status_t reconnectAtOffset(off64_t offset);
-
protected:
virtual ~MediaHTTP();
virtual sp<DecryptHandle> DrmInitialization(const char* mime);
- virtual String8 getUri();
- virtual String8 getMIMEType() const;
private:
- status_t mInitCheck;
- sp<MediaHTTPConnection> mHTTPConnection;
-
- KeyedVector<String8, String8> mLastHeaders;
- AString mLastURI;
-
- bool mCachedSizeValid;
- off64_t mCachedSize;
-
sp<DecryptHandle> mDecryptHandle;
DrmManagerClient *mDrmManagerClient;
diff --git a/media/libstagefright/include/media/stagefright/MetaDataUtils.h b/media/libstagefright/include/media/stagefright/MetaDataUtils.h
index d5a8080..4a7107d 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataUtils.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataUtils.h
@@ -24,6 +24,7 @@
struct ABuffer;
bool MakeAVCCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size);
+bool MakeAACCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size);
bool MakeAACCodecSpecificData(MetaDataBase &meta, unsigned profile, unsigned sampling_freq_index,
unsigned channel_configuration);
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 5cc5093..fb498d4 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -119,6 +119,7 @@
private:
struct StreamInfo {
unsigned mType;
+ unsigned mTypeExt;
unsigned mPID;
int32_t mCASystemId;
};
@@ -145,10 +146,12 @@
Stream(Program *program,
unsigned elementaryPID,
unsigned streamType,
+ unsigned streamTypeExt,
unsigned PCR_PID,
int32_t CA_system_ID);
unsigned type() const { return mStreamType; }
+ unsigned typeExt() const { return mStreamTypeExt; }
unsigned pid() const { return mElementaryPID; }
void setPID(unsigned pid) { mElementaryPID = pid; }
@@ -194,6 +197,7 @@
Program *mProgram;
unsigned mElementaryPID;
unsigned mStreamType;
+ unsigned mStreamTypeExt;
unsigned mPCR_PID;
int32_t mExpectedContinuityCounter;
@@ -447,7 +451,7 @@
if (descriptor_length > infoLength) {
break;
}
- if (descriptor_tag == 9 && descriptor_length >= 4) {
+ if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
found = true;
caDescriptor->mSystemID = br->getBits(16);
caDescriptor->mPID = br->getBits(16) & 0x1fff;
@@ -513,37 +517,65 @@
// infoBytesRemaining is the number of bytes that make up the
// variable length section of ES_infos. It does not include the
// final CRC.
- size_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
+ int32_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
while (infoBytesRemaining >= 5) {
-
- unsigned streamType = br->getBits(8);
- ALOGV(" stream_type = 0x%02x", streamType);
-
+ StreamInfo info;
+ info.mType = br->getBits(8);
+ ALOGV(" stream_type = 0x%02x", info.mType);
MY_LOGV(" reserved = %u", br->getBits(3));
- unsigned elementaryPID = br->getBits(13);
- ALOGV(" elementary_PID = 0x%04x", elementaryPID);
+ info.mPID = br->getBits(13);
+ ALOGV(" elementary_PID = 0x%04x", info.mPID);
MY_LOGV(" reserved = %u", br->getBits(4));
unsigned ES_info_length = br->getBits(12);
ALOGV(" ES_info_length = %u", ES_info_length);
+ infoBytesRemaining -= 5 + ES_info_length;
CADescriptor streamCA;
- bool hasStreamCA = findCADescriptor(br, ES_info_length, &streamCA);
+ info.mTypeExt = EXT_DESCRIPTOR_DVB_RESERVED_MAX;
+ bool hasStreamCA = false;
+ while (ES_info_length > 2 && infoBytesRemaining >= 0) {
+ unsigned descriptor_tag = br->getBits(8);
+ ALOGV(" tag = 0x%02x", descriptor_tag);
+
+ unsigned descriptor_length = br->getBits(8);
+ ALOGV(" len = %u", descriptor_length);
+
+ ES_info_length -= 2;
+ if (descriptor_length > ES_info_length) {
+ return ERROR_MALFORMED;
+ }
+ if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
+ hasStreamCA = true;
+ streamCA.mSystemID = br->getBits(16);
+ streamCA.mPID = br->getBits(16) & 0x1fff;
+ ES_info_length -= 4;
+ streamCA.mPrivateData.assign(br->data(), br->data() + descriptor_length - 4);
+ } else if (info.mType == STREAMTYPE_PES_PRIVATE_DATA &&
+ descriptor_tag == DESCRIPTOR_DVB_EXTENSION && descriptor_length >= 1) {
+ unsigned descTagExt = br->getBits(8);
+ ALOGV(" tag_ext = 0x%02x", descTagExt);
+ if (descTagExt == EXT_DESCRIPTOR_DVB_AC4) {
+ info.mTypeExt = EXT_DESCRIPTOR_DVB_AC4;
+ }
+ ES_info_length -= descriptor_length;
+ descriptor_length--;
+ br->skipBits(descriptor_length * 8);
+ } else {
+ ES_info_length -= descriptor_length;
+ br->skipBits(descriptor_length * 8);
+ }
+ }
if (hasStreamCA && !mParser->mCasManager->addStream(
- mProgramNumber, elementaryPID, streamCA)) {
+ mProgramNumber, info.mPID, streamCA)) {
return ERROR_MALFORMED;
}
- StreamInfo info;
- info.mType = streamType;
- info.mPID = elementaryPID;
info.mCASystemId = hasProgramCA ? programCA.mSystemID :
hasStreamCA ? streamCA.mSystemID : -1;
infos.push(info);
-
- infoBytesRemaining -= 5 + ES_info_length;
}
if (infoBytesRemaining != 0) {
@@ -602,7 +634,7 @@
if (index < 0) {
sp<Stream> stream = new Stream(
- this, info.mPID, info.mType, PCR_PID, info.mCASystemId);
+ this, info.mPID, info.mType, info.mTypeExt, PCR_PID, info.mCASystemId);
if (mSampleAesKeyItem != NULL) {
stream->signalNewSampleAesKey(mSampleAesKeyItem);
@@ -720,11 +752,13 @@
Program *program,
unsigned elementaryPID,
unsigned streamType,
+ unsigned streamTypeExt,
unsigned PCR_PID,
int32_t CA_system_ID)
: mProgram(program),
mElementaryPID(elementaryPID),
mStreamType(streamType),
+ mStreamTypeExt(streamTypeExt),
mPCR_PID(PCR_PID),
mExpectedContinuityCounter(-1),
mPayloadStarted(false),
@@ -741,10 +775,12 @@
ALOGV("new stream PID 0x%02x, type 0x%02x, scrambled %d, SampleEncrypted: %d",
elementaryPID, streamType, mScrambled, mSampleEncrypted);
- uint32_t flags =
- (isVideo() && mScrambled) ? ElementaryStreamQueue::kFlag_ScrambledData :
- (mSampleEncrypted) ? ElementaryStreamQueue::kFlag_SampleEncryptedData :
- 0;
+ uint32_t flags = 0;
+ if (((isVideo() || isAudio()) && mScrambled)) {
+ flags = ElementaryStreamQueue::kFlag_ScrambledData;
+ } else if (mSampleEncrypted) {
+ flags = ElementaryStreamQueue::kFlag_SampleEncryptedData;
+ }
ElementaryStreamQueue::Mode mode = ElementaryStreamQueue::INVALID;
@@ -781,6 +817,16 @@
mode = ElementaryStreamQueue::AC3;
break;
+ case STREAMTYPE_EAC3:
+ mode = ElementaryStreamQueue::EAC3;
+ break;
+
+ case STREAMTYPE_PES_PRIVATE_DATA:
+ if (mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4) {
+ mode = ElementaryStreamQueue::AC4;
+ }
+ break;
+
case STREAMTYPE_METADATA:
mode = ElementaryStreamQueue::METADATA;
break;
@@ -986,9 +1032,12 @@
case STREAMTYPE_MPEG2_AUDIO_ADTS:
case STREAMTYPE_LPCM_AC3:
case STREAMTYPE_AC3:
+ case STREAMTYPE_EAC3:
case STREAMTYPE_AAC_ENCRYPTED:
case STREAMTYPE_AC3_ENCRYPTED:
return true;
+ case STREAMTYPE_PES_PRIVATE_DATA:
+ return mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4;
default:
return false;
@@ -1395,7 +1444,7 @@
// Perform the 1st pass descrambling if needed
if (descrambleBytes > 0) {
memcpy(mDescrambledBuffer->data(), mBuffer->data(), descrambleBytes);
- mDescrambledBuffer->setRange(0, descrambleBytes);
+ mDescrambledBuffer->setRange(0, mBuffer->size());
hidl_vec<SubSample> subSamples;
subSamples.resize(descrambleSubSamples);
@@ -1412,10 +1461,9 @@
}
}
- uint64_t srcOffset = 0, dstOffset = 0;
- // If scrambled at PES-level, PES header should be skipped
+ // If scrambled at PES-level, PES header is in the clear
if (pesScramblingControl != 0) {
- srcOffset = dstOffset = pesOffset;
+ subSamples[0].numBytesOfClearData = pesOffset;
subSamples[0].numBytesOfEncryptedData -= pesOffset;
}
@@ -1431,9 +1479,9 @@
(ScramblingControl) sctrl,
subSamples,
mDescramblerSrcBuffer,
- srcOffset,
+ 0 /*srcOffset*/,
dstBuffer,
- dstOffset,
+ 0 /*dstOffset*/,
[&status, &bytesWritten, &detailedError] (
Status _status, uint32_t _bytesWritten,
const hidl_string& _detailedError) {
@@ -1450,9 +1498,21 @@
ALOGV("[stream %d] descramble succeeded, %d bytes",
mElementaryPID, bytesWritten);
- memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
+
+ // Set descrambleBytes to the returned result.
+ // Note that this might be smaller than the total length of input data.
+ // (eg. when we're descrambling the PES header portion of a secure stream,
+ // the plugin might cut it off right after the PES header.)
+ descrambleBytes = bytesWritten;
}
+ // |buffer| points to the buffer from which we'd parse the PES header.
+ // When the output stream is scrambled, it points to mDescrambledBuffer
+ // (unless all packets in this PES are actually clear, in which case,
+ // it points to mBuffer since we never copied into mDescrambledBuffer).
+ // When the output stream is clear, it points to mBuffer, and we'll
+ // copy all descrambled data back to mBuffer.
+ sp<ABuffer> buffer = mBuffer;
if (mQueue->isScrambled()) {
// Queue subSample info for scrambled queue
sp<ABuffer> clearSizesBuffer = new ABuffer(mSubSamples.size() * 4);
@@ -1464,8 +1524,7 @@
for (auto it = mSubSamples.begin();
it != mSubSamples.end(); it++, i++) {
if ((it->transport_scrambling_mode == 0
- && pesScramblingControl == 0)
- || i < descrambleSubSamples) {
+ && pesScramblingControl == 0)) {
clearSizePtr[i] = it->subSampleSize;
encSizePtr[i] = 0;
} else {
@@ -1474,14 +1533,29 @@
}
isSync |= it->random_access_indicator;
}
+
+ // If scrambled at PES-level, PES header is in the clear
+ if (pesScramblingControl != 0) {
+ clearSizePtr[0] = pesOffset;
+ encSizePtr[0] -= pesOffset;
+ }
// Pass the original TS subsample size now. The PES header adjust
// will be applied when the scrambled AU is dequeued.
+ // Note that if descrambleBytes is 0, it means this PES contains only
+ // all ts packets, leadingClearBytes is entire buffer size.
mQueue->appendScrambledData(
- mBuffer->data(), mBuffer->size(), sctrl,
- isSync, clearSizesBuffer, encSizesBuffer);
+ mBuffer->data(), mBuffer->size(),
+ (descrambleBytes > 0) ? descrambleBytes : mBuffer->size(),
+ sctrl, isSync, clearSizesBuffer, encSizesBuffer);
+
+ if (descrambleBytes > 0) {
+ buffer = mDescrambledBuffer;
+ }
+ } else {
+ memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
}
- ABitReader br(mBuffer->data(), mBuffer->size());
+ ABitReader br(buffer->data(), buffer->size());
status_t err = parsePES(&br, event);
if (err != OK) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 45ca06b..a31dc46 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -142,6 +142,7 @@
STREAMTYPE_MPEG2_VIDEO = 0x02,
STREAMTYPE_MPEG1_AUDIO = 0x03,
STREAMTYPE_MPEG2_AUDIO = 0x04,
+ STREAMTYPE_PES_PRIVATE_DATA = 0x06,
STREAMTYPE_MPEG2_AUDIO_ADTS = 0x0f,
STREAMTYPE_MPEG4_VIDEO = 0x10,
STREAMTYPE_METADATA = 0x15,
@@ -153,6 +154,7 @@
// Stream type 0x83 is non-standard,
// it could be LPCM or TrueHD AC3
STREAMTYPE_LPCM_AC3 = 0x83,
+ STREAMTYPE_EAC3 = 0x87,
//Sample Encrypted types
STREAMTYPE_H264_ENCRYPTED = 0xDB,
@@ -160,6 +162,20 @@
STREAMTYPE_AC3_ENCRYPTED = 0xC1,
};
+ enum {
+ // From ISO/IEC 13818-1: 2007 (E), Table 2-29
+ DESCRIPTOR_CA = 0x09,
+
+ // DVB BlueBook A038 Table 12
+ DESCRIPTOR_DVB_EXTENSION = 0x7F,
+ };
+
+ // DVB BlueBook A038 Table 109
+ enum {
+ EXT_DESCRIPTOR_DVB_AC4 = 0x15,
+ EXT_DESCRIPTOR_DVB_RESERVED_MAX = 0x7F,
+ };
+
protected:
virtual ~ATSParser();
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index ece0692..9e154a3 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -226,6 +226,7 @@
int32_t cryptoMode;
if (buffer->meta()->findInt32("cryptoMode", &cryptoMode)) {
int32_t cryptoKey;
+ int32_t pesOffset;
sp<ABuffer> clearBytesBuffer, encBytesBuffer;
CHECK(buffer->meta()->findInt32("cryptoKey", &cryptoKey));
@@ -233,6 +234,8 @@
&& clearBytesBuffer != NULL);
CHECK(buffer->meta()->findBuffer("encBytes", &encBytesBuffer)
&& encBytesBuffer != NULL);
+ CHECK(buffer->meta()->findInt32("pesOffset", &pesOffset)
+ && (pesOffset >= 0) && (pesOffset < 65536));
bufmeta.setInt32(kKeyCryptoMode, cryptoMode);
@@ -240,6 +243,11 @@
bufmeta.setData(kKeyCryptoIV, 0, array, 16);
array[0] = (uint8_t) (cryptoKey & 0xff);
+ // array[1] contains PES header flag, which we don't use.
+ // array[2~3] contain the PES offset.
+ array[2] = (uint8_t) (pesOffset & 0xff);
+ array[3] = (uint8_t) ((pesOffset >> 8) & 0xff);
+
bufmeta.setData(kKeyCryptoKey, 0, array, 16);
bufmeta.setData(kKeyPlainSizes, 0,
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 0fa9fcb..fb8b9fd 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -86,6 +86,21 @@
mCasSessionId = sessionId;
}
+static int32_t readVariableBits(ABitReader &bits, int32_t nbits) {
+ int32_t value = 0;
+ int32_t more_bits = 1;
+
+ while (more_bits) {
+ value += bits.getBits(nbits);
+ more_bits = bits.getBits(1);
+ if (!more_bits)
+ break;
+ value++;
+ value <<= nbits;
+ }
+ return value;
+}
+
// Parse AC3 header assuming the current ptr is start position of syncframe,
// update metadata only applicable, and return the payload size
static unsigned parseAC3SyncFrame(
@@ -195,8 +210,153 @@
return payloadSize;
}
-static bool IsSeeminglyValidAC3Header(const uint8_t *ptr, size_t size) {
- return parseAC3SyncFrame(ptr, size, NULL) > 0;
+// Parse EAC3 header assuming the current ptr is start position of syncframe,
+// update metadata only applicable, and return the payload size
+// ATSC A/52:2012 E2.3.1
+static unsigned parseEAC3SyncFrame(
+ const uint8_t *ptr, size_t size, sp<MetaData> *metaData) {
+ static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+ static const unsigned samplingRateTable[] = {48000, 44100, 32000};
+ static const unsigned samplingRateTable2[] = {24000, 22050, 16000};
+
+ ABitReader bits(ptr, size);
+ if (bits.numBitsLeft() < 16) {
+ ALOGE("Not enough bits left for further parsing");
+ return 0;
+ }
+ if (bits.getBits(16) != 0x0B77) {
+ ALOGE("No valid sync word in EAC3 header");
+ return 0;
+ }
+
+ // we parse up to bsid so there needs to be at least that many bits
+ if (bits.numBitsLeft() < 2 + 3 + 11 + 2 + 2 + 3 + 1 + 5) {
+ ALOGE("Not enough bits left for further parsing");
+ return 0;
+ }
+
+ unsigned strmtyp = bits.getBits(2);
+ if (strmtyp == 3) {
+ ALOGE("Incorrect strmtyp in EAC3 header");
+ return 0;
+ }
+
+ unsigned substreamid = bits.getBits(3);
+ // only the first independent stream is supported
+ if ((strmtyp == 0 || strmtyp == 2) && substreamid != 0)
+ return 0;
+
+ unsigned frmsiz = bits.getBits(11);
+ unsigned fscod = bits.getBits(2);
+
+ unsigned samplingRate = 0;
+ if (fscod == 0x3) {
+ unsigned fscod2 = bits.getBits(2);
+ if (fscod2 == 3) {
+ ALOGW("Incorrect fscod2 in EAC3 header");
+ return 0;
+ }
+ samplingRate = samplingRateTable2[fscod2];
+ } else {
+ samplingRate = samplingRateTable[fscod];
+ unsigned numblkscod __unused = bits.getBits(2);
+ }
+
+ unsigned acmod = bits.getBits(3);
+ unsigned lfeon = bits.getBits(1);
+ unsigned bsid = bits.getBits(5);
+ if (bsid < 11 || bsid > 16) {
+ ALOGW("Incorrect bsid in EAC3 header. Could be AC-3 or some unknown EAC3 format");
+ return 0;
+ }
+
+ // we currently only support the first independant stream
+ if (metaData != NULL && (strmtyp == 0 || strmtyp == 2)) {
+ unsigned channelCount = channelCountTable[acmod] + lfeon;
+ ALOGV("EAC3 channelCount = %d", channelCount);
+ ALOGV("EAC3 samplingRate = %d", samplingRate);
+ (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EAC3);
+ (*metaData)->setInt32(kKeyChannelCount, channelCount);
+ (*metaData)->setInt32(kKeySampleRate, samplingRate);
+ (*metaData)->setInt32(kKeyIsSyncFrame, 1);
+ }
+
+ unsigned payloadSize = frmsiz + 1;
+ payloadSize <<= 1; // convert from 16-bit words to bytes
+
+ return payloadSize;
+}
+
+// Parse AC4 header assuming the current ptr is start position of syncframe
+// and update frameSize and metadata.
+static status_t parseAC4SyncFrame(
+ const uint8_t *ptr, size_t size, unsigned &frameSize, sp<MetaData> *metaData) {
+ // ETSI TS 103 190-2 V1.1.1 (2015-09), Annex C
+ // The sync_word can be either 0xAC40 or 0xAC41.
+ static const int kSyncWordAC40 = 0xAC40;
+ static const int kSyncWordAC41 = 0xAC41;
+
+ size_t headerSize = 0;
+ ABitReader bits(ptr, size);
+ int32_t syncWord = bits.getBits(16);
+ if ((syncWord != kSyncWordAC40) && (syncWord != kSyncWordAC41)) {
+ ALOGE("Invalid syncword in AC4 header");
+ return ERROR_MALFORMED;
+ }
+ headerSize += 2;
+
+ frameSize = bits.getBits(16);
+ headerSize += 2;
+ if (frameSize == 0xFFFF) {
+ frameSize = bits.getBits(24);
+ headerSize += 3;
+ }
+
+ if (frameSize == 0) {
+ ALOGE("Invalid frame size in AC4 header");
+ return ERROR_MALFORMED;
+ }
+ frameSize += headerSize;
+ // If the sync_word is 0xAC41, a crc_word is also transmitted.
+ if (syncWord == kSyncWordAC41) {
+ frameSize += 2; // crc_word
+ }
+ ALOGV("AC4 frameSize = %u", frameSize);
+
+ // ETSI TS 103 190-2 V1.1.1 6.2.1.1
+ uint32_t bitstreamVersion = bits.getBits(2);
+ if (bitstreamVersion == 3) {
+ bitstreamVersion += readVariableBits(bits, 2);
+ }
+
+ bits.skipBits(10); // Sequence Counter
+
+ uint32_t bWaitFrames = bits.getBits(1);
+ if (bWaitFrames) {
+ uint32_t waitFrames = bits.getBits(3);
+ if (waitFrames > 0) {
+ bits.skipBits(2); // br_code;
+ }
+ }
+
+ // ETSI TS 103 190 V1.1.1 Table 82
+ bool fsIndex = bits.getBits(1);
+ uint32_t samplingRate = fsIndex ? 48000 : 44100;
+
+ if (metaData != NULL) {
+ ALOGV("dequeueAccessUnitAC4 Setting mFormat");
+ (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+ (*metaData)->setInt32(kKeyIsSyncFrame, 1);
+ // [FIXME] AC4 channel count is defined per presentation. Provide a default channel count
+ // as stereo for the entire stream.
+ (*metaData)->setInt32(kKeyChannelCount, 2);
+ (*metaData)->setInt32(kKeySampleRate, samplingRate);
+ }
+ return OK;
+}
+
+static status_t IsSeeminglyValidAC4Header(const uint8_t *ptr, size_t size, unsigned &frameSize) {
+ return parseAC4SyncFrame(ptr, size, frameSize, NULL);
}
static bool IsSeeminglyValidADTSHeader(
@@ -279,7 +439,8 @@
ALOGE("appending data after EOS");
return ERROR_MALFORMED;
}
- if (mBuffer == NULL || mBuffer->size() == 0) {
+
+ if (!isScrambled() && (mBuffer == NULL || mBuffer->size() == 0)) {
switch (mMode) {
case H264:
case MPEG_VIDEO:
@@ -390,12 +551,19 @@
}
case AC3:
+ case EAC3:
{
uint8_t *ptr = (uint8_t *)data;
ssize_t startOffset = -1;
for (size_t i = 0; i < size; ++i) {
- if (IsSeeminglyValidAC3Header(&ptr[i], size - i)) {
+ unsigned payloadSize = 0;
+ if (mMode == AC3) {
+ payloadSize = parseAC3SyncFrame(&ptr[i], size - i, NULL);
+ } else if (mMode == EAC3) {
+ payloadSize = parseEAC3SyncFrame(&ptr[i], size - i, NULL);
+ }
+ if (payloadSize > 0) {
startOffset = i;
break;
}
@@ -406,7 +574,7 @@
}
if (startOffset > 0) {
- ALOGI("found something resembling an AC3 syncword at "
+ ALOGI("found something resembling an (E)AC3 syncword at "
"offset %zd",
startOffset);
}
@@ -416,6 +584,43 @@
break;
}
+ case AC4:
+ {
+ uint8_t *ptr = (uint8_t *)data;
+ unsigned frameSize = 0;
+ ssize_t startOffset = -1;
+
+ // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+ // (Sync header 4 bytes + AC4 toc 3 bytes)
+ if (size < 7) {
+ return ERROR_MALFORMED;
+ }
+ for (size_t i = 0; i < size; ++i) {
+ if (IsSeeminglyValidAC4Header(&ptr[i], size - i, frameSize) == OK) {
+ startOffset = i;
+ break;
+ }
+ }
+
+ if (startOffset < 0) {
+ return ERROR_MALFORMED;
+ }
+
+ if (startOffset > 0) {
+ ALOGI("found something resembling an AC4 syncword at "
+ "offset %zd",
+ startOffset);
+ }
+ if (frameSize != size - startOffset) {
+ ALOGV("AC4 frame size is %u bytes, while the buffer size is %zd bytes.",
+ frameSize, size - startOffset);
+ }
+
+ data = &ptr[startOffset];
+ size -= startOffset;
+ break;
+ }
+
case MPEG_AUDIO:
{
uint8_t *ptr = (uint8_t *)data;
@@ -494,6 +699,7 @@
void ElementaryStreamQueue::appendScrambledData(
const void *data, size_t size,
+ size_t leadingClearBytes,
int32_t keyId, bool isSync,
sp<ABuffer> clearSizes, sp<ABuffer> encSizes) {
if (!isScrambled()) {
@@ -521,6 +727,7 @@
ScrambledRangeInfo scrambledInfo;
scrambledInfo.mLength = size;
+ scrambledInfo.mLeadingClearBytes = leadingClearBytes;
scrambledInfo.mKeyId = keyId;
scrambledInfo.mIsSync = isSync;
scrambledInfo.mClearSizes = clearSizes;
@@ -533,7 +740,6 @@
sp<ABuffer> ElementaryStreamQueue::dequeueScrambledAccessUnit() {
size_t nextScan = mBuffer->size();
- mBuffer->setRange(0, 0);
int32_t pesOffset = 0, pesScramblingControl = 0;
int64_t timeUs = fetchTimestamp(nextScan, &pesOffset, &pesScramblingControl);
if (timeUs < 0ll) {
@@ -544,6 +750,7 @@
// return scrambled unit
int32_t keyId = pesScramblingControl, isSync = 0, scrambledLength = 0;
sp<ABuffer> clearSizes, encSizes;
+ size_t leadingClearBytes;
while (mScrambledRangeInfos.size() > mRangeInfos.size()) {
auto it = mScrambledRangeInfos.begin();
ALOGV("[stream %d] fetching scrambled range: size=%zu", mMode, it->mLength);
@@ -561,6 +768,7 @@
clearSizes = it->mClearSizes;
encSizes = it->mEncSizes;
isSync = it->mIsSync;
+ leadingClearBytes = it->mLeadingClearBytes;
mScrambledRangeInfos.erase(it);
}
if (scrambledLength == 0) {
@@ -568,26 +776,74 @@
return NULL;
}
- // skip the PES header, and copy the rest into scrambled access unit
- sp<ABuffer> scrambledAccessUnit = ABuffer::CreateAsCopy(
- mScrambledBuffer->data() + pesOffset,
- scrambledLength - pesOffset);
-
- // fix up first sample size after skipping the PES header
- if (pesOffset > 0) {
- int32_t &firstClearSize = *(int32_t*)clearSizes->data();
- int32_t &firstEncSize = *(int32_t*)encSizes->data();
- // Cut away the PES header
- if (firstClearSize >= pesOffset) {
- // This is for TS-level scrambling, we descrambled the first
- // (or it was clear to begin with)
- firstClearSize -= pesOffset;
- } else if (firstEncSize >= pesOffset) {
- // This can only be PES-level scrambling
- firstEncSize -= pesOffset;
- }
+ // Retrieve the leading clear bytes info, and use it to set the clear
+ // range on mBuffer. Note that the leading clear bytes includes the
+ // PES header portion, while mBuffer doesn't.
+ if ((int32_t)leadingClearBytes > pesOffset) {
+ mBuffer->setRange(0, leadingClearBytes - pesOffset);
+ } else {
+ mBuffer->setRange(0, 0);
}
+ // Try to parse formats, and if unavailable set up a dummy format.
+ // Only support the following modes for scrambled content for now.
+ // (will be expanded later).
+ if (mFormat == NULL) {
+ mFormat = new MetaData;
+ switch (mMode) {
+ case H264:
+ {
+ if (!MakeAVCCodecSpecificData(
+ *mFormat, mBuffer->data(), mBuffer->size())) {
+ ALOGI("Creating dummy AVC format for scrambled content");
+
+ mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+ mFormat->setInt32(kKeyWidth, 1280);
+ mFormat->setInt32(kKeyHeight, 720);
+ }
+ break;
+ }
+ case AAC:
+ {
+ if (!MakeAACCodecSpecificData(
+ *mFormat, mBuffer->data(), mBuffer->size())) {
+ ALOGI("Creating dummy AAC format for scrambled content");
+
+ MakeAACCodecSpecificData(*mFormat,
+ 1 /*profile*/, 7 /*sampling_freq_index*/, 1 /*channel_config*/);
+ mFormat->setInt32(kKeyIsADTS, true);
+ }
+
+ break;
+ }
+ case MPEG_VIDEO:
+ {
+ ALOGI("Creating dummy MPEG format for scrambled content");
+
+ mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
+ mFormat->setInt32(kKeyWidth, 1280);
+ mFormat->setInt32(kKeyHeight, 720);
+ break;
+ }
+ default:
+ {
+ ALOGE("Unknown mode for scrambled content");
+ return NULL;
+ }
+ }
+
+ // for MediaExtractor.CasInfo
+ mFormat->setInt32(kKeyCASystemID, mCASystemId);
+ mFormat->setData(kKeyCASessionID,
+ 0, mCasSessionId.data(), mCasSessionId.size());
+ }
+
+ mBuffer->setRange(0, 0);
+
+ // copy into scrambled access unit
+ sp<ABuffer> scrambledAccessUnit = ABuffer::CreateAsCopy(
+ mScrambledBuffer->data(), scrambledLength);
+
scrambledAccessUnit->meta()->setInt64("timeUs", timeUs);
if (isSync) {
scrambledAccessUnit->meta()->setInt32("isSync", 1);
@@ -600,6 +856,7 @@
scrambledAccessUnit->meta()->setInt32("cryptoKey", keyId);
scrambledAccessUnit->meta()->setBuffer("clearBytes", clearSizes);
scrambledAccessUnit->meta()->setBuffer("encBytes", encSizes);
+ scrambledAccessUnit->meta()->setInt32("pesOffset", pesOffset);
memmove(mScrambledBuffer->data(),
mScrambledBuffer->data() + scrambledLength,
@@ -614,7 +871,11 @@
}
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnit() {
- if ((mFlags & kFlag_AlignedData) && mMode == H264 && !isScrambled()) {
+ if (isScrambled()) {
+ return dequeueScrambledAccessUnit();
+ }
+
+ if ((mFlags & kFlag_AlignedData) && mMode == H264) {
if (mRangeInfos.empty()) {
return NULL;
}
@@ -648,7 +909,10 @@
case AAC:
return dequeueAccessUnitAAC();
case AC3:
- return dequeueAccessUnitAC3();
+ case EAC3:
+ return dequeueAccessUnitEAC3();
+ case AC4:
+ return dequeueAccessUnitAC4();
case MPEG_VIDEO:
return dequeueAccessUnitMPEGVideo();
case MPEG4_VIDEO:
@@ -666,34 +930,38 @@
}
}
-sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC3() {
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitEAC3() {
unsigned syncStartPos = 0; // in bytes
unsigned payloadSize = 0;
sp<MetaData> format = new MetaData;
- ALOGV("dequeueAccessUnit_AC3[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+ ALOGV("dequeueAccessUnitEAC3[%d]: mBuffer %p(%zu)", mAUIndex,
+ mBuffer->data(), mBuffer->size());
while (true) {
if (syncStartPos + 2 >= mBuffer->size()) {
return NULL;
}
- payloadSize = parseAC3SyncFrame(
- mBuffer->data() + syncStartPos,
- mBuffer->size() - syncStartPos,
- &format);
+ uint8_t *ptr = mBuffer->data() + syncStartPos;
+ size_t size = mBuffer->size() - syncStartPos;
+ if (mMode == AC3) {
+ payloadSize = parseAC3SyncFrame(ptr, size, &format);
+ } else if (mMode == EAC3) {
+ payloadSize = parseEAC3SyncFrame(ptr, size, &format);
+ }
if (payloadSize > 0) {
break;
}
- ALOGV("dequeueAccessUnit_AC3[%d]: syncStartPos %u payloadSize %u",
+ ALOGV("dequeueAccessUnitEAC3[%d]: syncStartPos %u payloadSize %u",
mAUIndex, syncStartPos, payloadSize);
++syncStartPos;
}
if (mBuffer->size() < syncStartPos + payloadSize) {
- ALOGV("Not enough buffer size for AC3");
+ ALOGV("Not enough buffer size for E/AC3");
return NULL;
}
@@ -701,7 +969,6 @@
mFormat = format;
}
-
int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
if (timeUs < 0ll) {
ALOGE("negative timeUs");
@@ -710,7 +977,12 @@
// Not decrypting if key info not available (e.g., scanner/extractor parsing ts files)
if (mSampleDecryptor != NULL) {
- mSampleDecryptor->processAC3(mBuffer->data() + syncStartPos, payloadSize);
+ if (mMode == AC3) {
+ mSampleDecryptor->processAC3(mBuffer->data() + syncStartPos, payloadSize);
+ } else if (mMode == EAC3) {
+ ALOGE("EAC3 AU is encrypted and decryption is not supported");
+ return NULL;
+ }
}
mAUIndex++;
@@ -730,6 +1002,69 @@
return accessUnit;
}
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC4() {
+ unsigned syncStartPos = 0;
+ unsigned payloadSize = 0;
+ sp<MetaData> format = new MetaData;
+ ALOGV("dequeueAccessUnit_AC4[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+
+ // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+ // (Sync header 4 bytes + AC4 toc 3 bytes)
+ if (mBuffer->size() < 7) {
+ return NULL;
+ }
+
+ while (true) {
+ if (syncStartPos + 2 >= mBuffer->size()) {
+ return NULL;
+ }
+
+ status_t status = parseAC4SyncFrame(
+ mBuffer->data() + syncStartPos,
+ mBuffer->size() - syncStartPos,
+ payloadSize,
+ &format);
+ if (status == OK) {
+ break;
+ }
+
+ ALOGV("dequeueAccessUnit_AC4[%d]: syncStartPos %u payloadSize %u",
+ mAUIndex, syncStartPos, payloadSize);
+
+ ++syncStartPos;
+ }
+
+ if (mBuffer->size() < syncStartPos + payloadSize) {
+ ALOGV("Not enough buffer size for AC4");
+ return NULL;
+ }
+
+ if (mFormat == NULL) {
+ mFormat = format;
+ }
+
+ int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
+ if (timeUs < 0ll) {
+ ALOGE("negative timeUs");
+ return NULL;
+ }
+ mAUIndex++;
+
+ sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+ memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
+ accessUnit->meta()->setInt64("timeUs", timeUs);
+ accessUnit->meta()->setInt32("isSync", 1);
+
+ memmove(
+ mBuffer->data(),
+ mBuffer->data() + syncStartPos + payloadSize,
+ mBuffer->size() - syncStartPos - payloadSize);
+
+ mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize);
+ return accessUnit;
+}
+
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitPCMAudio() {
if (mBuffer->size() < 4) {
return NULL;
@@ -851,25 +1186,11 @@
bool protection_absent = bits.getBits(1) != 0;
if (mFormat == NULL) {
- unsigned profile = bits.getBits(2);
- if (profile == 3u) {
- ALOGE("profile should not be 3");
- return NULL;
- }
- unsigned sampling_freq_index = bits.getBits(4);
- bits.getBits(1); // private_bit
- unsigned channel_configuration = bits.getBits(3);
- if (channel_configuration == 0u) {
- ALOGE("channel_config should not be 0");
- return NULL;
- }
- bits.skipBits(2); // original_copy, home
-
mFormat = new MetaData;
- MakeAACCodecSpecificData(*mFormat,
- profile, sampling_freq_index, channel_configuration);
-
- mFormat->setInt32(kKeyIsADTS, true);
+ if (!MakeAACCodecSpecificData(
+ *mFormat, mBuffer->data() + offset, mBuffer->size() - offset)) {
+ return NULL;
+ }
int32_t sampleRate;
int32_t numChannels;
@@ -884,12 +1205,12 @@
ALOGI("found AAC codec config (%d Hz, %d channels)",
sampleRate, numChannels);
- } else {
- // profile_ObjectType, sampling_frequency_index, private_bits,
- // channel_configuration, original_copy, home
- bits.skipBits(12);
}
+ // profile_ObjectType, sampling_frequency_index, private_bits,
+ // channel_configuration, original_copy, home
+ bits.skipBits(12);
+
// adts_variable_header
// copyright_identification_bit, copyright_identification_start
@@ -1004,27 +1325,6 @@
}
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitH264() {
- if (isScrambled()) {
- if (mBuffer == NULL || mBuffer->size() == 0) {
- return NULL;
- }
- if (mFormat == NULL) {
- mFormat = new MetaData;
- if (!MakeAVCCodecSpecificData(*mFormat, mBuffer->data(), mBuffer->size())) {
- ALOGW("Creating dummy AVC format for scrambled content");
- mFormat = new MetaData;
- mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
- mFormat->setInt32(kKeyWidth, 1280);
- mFormat->setInt32(kKeyHeight, 720);
- }
- // for MediaExtractor.CasInfo
- mFormat->setInt32(kKeyCASystemID, mCASystemId);
- mFormat->setData(kKeyCASessionID, 0,
- mCasSessionId.data(), mCasSessionId.size());
- }
- return dequeueScrambledAccessUnit();
- }
-
const uint8_t *data = mBuffer->data();
size_t size = mBuffer->size();
@@ -1324,25 +1624,6 @@
}
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitMPEGVideo() {
- if (isScrambled()) {
- if (mBuffer == NULL || mBuffer->size() == 0) {
- return NULL;
- }
- if (mFormat == NULL) {
- ALOGI("Creating dummy MPEG format for scrambled content");
- mFormat = new MetaData;
- mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
- mFormat->setInt32(kKeyWidth, 1280);
- mFormat->setInt32(kKeyHeight, 720);
-
- // for MediaExtractor.CasInfo
- mFormat->setInt32(kKeyCASystemID, mCASystemId);
- mFormat->setData(kKeyCASessionID, 0,
- mCasSessionId.data(), mCasSessionId.size());
- }
- return dequeueScrambledAccessUnit();
- }
-
const uint8_t *data = mBuffer->data();
size_t size = mBuffer->size();
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index ffcb502..3227f47 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -38,6 +38,8 @@
H264,
AAC,
AC3,
+ EAC3,
+ AC4,
MPEG_AUDIO,
MPEG_VIDEO,
MPEG4_VIDEO,
@@ -59,6 +61,7 @@
void appendScrambledData(
const void *data, size_t size,
+ size_t leadingClearBytes,
int32_t keyId, bool isSync,
sp<ABuffer> clearSizes, sp<ABuffer> encSizes);
@@ -84,8 +87,8 @@
};
struct ScrambledRangeInfo {
- //int64_t mTimestampUs;
size_t mLength;
+ size_t mLeadingClearBytes;
int32_t mKeyId;
int32_t mIsSync;
sp<ABuffer> mClearSizes;
@@ -115,7 +118,8 @@
sp<ABuffer> dequeueAccessUnitH264();
sp<ABuffer> dequeueAccessUnitAAC();
- sp<ABuffer> dequeueAccessUnitAC3();
+ sp<ABuffer> dequeueAccessUnitEAC3();
+ sp<ABuffer> dequeueAccessUnitAC4();
sp<ABuffer> dequeueAccessUnitMPEGAudio();
sp<ABuffer> dequeueAccessUnitMPEGVideo();
sp<ABuffer> dequeueAccessUnitMPEG4Video();
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 935dc34..672a37c 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -154,12 +154,12 @@
outDef->format.video.nFrameWidth = outputBufferWidth();
outDef->format.video.nFrameHeight = outputBufferHeight();
outDef->format.video.eColorFormat = mOutputFormat;
- outDef->format.video.nStride = outDef->format.video.nFrameWidth;
outDef->format.video.nSliceHeight = outDef->format.video.nFrameHeight;
int32_t bpp = (mOutputFormat == OMX_COLOR_FormatYUV420Planar16) ? 2 : 1;
+ outDef->format.video.nStride = outDef->format.video.nFrameWidth * bpp;
outDef->nBufferSize =
- (outDef->format.video.nStride * outDef->format.video.nSliceHeight * bpp * 3) / 2;
+ (outDef->format.video.nStride * outDef->format.video.nSliceHeight * 3) / 2;
OMX_PARAM_PORTDEFINITIONTYPE *inDef = &editPortInfo(kInputPortIndex)->mDef;
inDef->format.video.nFrameWidth = mWidth;
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index d459cbd..665d51a 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -24,7 +24,7 @@
#include <media/MediaHTTPConnection.h>
#include <media/MediaHTTPService.h>
-#include <media/stagefright/MediaHTTP.h>
+#include <media/stagefright/ClearMediaHTTP.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/Utils.h>
@@ -41,7 +41,7 @@
mFlags(flags),
mNetLooper(new ALooper),
mCancelled(false),
- mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())) {
+ mHTTPDataSource(new ClearMediaHTTP(httpService->makeHTTPConnection())) {
mNetLooper->setName("sdp net");
mNetLooper->start(false /* runOnCallingThread */,
false /* canCallJava */,
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 6d10f1c..5597488 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -309,6 +309,7 @@
}
String8 defaultUrl;
DrmPlugin::KeyRequestType keyRequestType;
+ mObj->mKeyRequest.clear();
status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType),
mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl,
&keyRequestType);
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index d6dae5b..f5b3f92 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -21,20 +21,31 @@
"MemoryLeakTrackUtil.cpp",
"ProcessInfo.cpp",
"SchedulingPolicyService.cpp",
+ "ServiceUtilities.cpp",
+ "TimeCheck.cpp",
],
shared_libs: [
"libbinder",
+ "libcutils",
"liblog",
"libutils",
"libmemunreachable",
],
+ logtags: ["EventLogTags.logtags"],
+
cflags: [
"-Wall",
"-Wextra",
"-Werror",
],
+ product_variables: {
+ product_is_iot: {
+ cflags: ["-DTARGET_ANDROID_THINGS"],
+ },
+ },
+
local_include_dirs: ["include"],
export_include_dirs: ["include"],
}
diff --git a/media/utils/EventLogTags.logtags b/media/utils/EventLogTags.logtags
new file mode 100644
index 0000000..67f0ea8
--- /dev/null
+++ b/media/utils/EventLogTags.logtags
@@ -0,0 +1,41 @@
+# The entries in this file map a sparse set of log tag numbers to tag names.
+# This is installed on the device, in /system/etc, and parsed by logcat.
+#
+# Tag numbers are decimal integers, from 0 to 2^31. (Let's leave the
+# negative values alone for now.)
+#
+# Tag names are one or more ASCII letters and numbers or underscores, i.e.
+# "[A-Z][a-z][0-9]_". Do not include spaces or punctuation (the former
+# impacts log readability, the latter makes regex searches more annoying).
+#
+# Tag numbers and names are separated by whitespace. Blank lines and lines
+# starting with '#' are ignored.
+#
+# Optionally, after the tag names can be put a description for the value(s)
+# of the tag. Description are in the format
+# (<name>|data type[|data unit])
+# Multiple values are separated by commas.
+#
+# The data type is a number from the following values:
+# 1: int
+# 2: long
+# 3: string
+# 4: list
+#
+# The data unit is a number taken from the following list:
+# 1: Number of objects
+# 2: Number of bytes
+# 3: Number of milliseconds
+# 4: Number of allocations
+# 5: Id
+# 6: Percent
+# Default value for data of type int/long is 2 (bytes).
+#
+# See system/core/logcat/event.logtags for the master copy of the tags.
+
+# 61000 - 61199 reserved for audioserver
+
+61000 audioserver_binder_timeout (command|3)
+
+# NOTE - the range 1000000-2000000 is reserved for partners and others who
+# want to define their own log tags without conflicting with the core platform.
diff --git a/services/audioflinger/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
similarity index 76%
rename from services/audioflinger/ServiceUtilities.cpp
rename to media/utils/ServiceUtilities.cpp
index aa267ea..1c54aec 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -14,12 +14,13 @@
* limitations under the License.
*/
+#define LOG_TAG "ServiceUtilities"
+
#include <binder/AppOpsManager.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/PermissionCache.h>
-#include <private/android_filesystem_config.h>
-#include "ServiceUtilities.h"
+#include "mediautils/ServiceUtilities.h"
/* When performing permission checks we do not use permission cache for
* runtime permissions (protection level dangerous) as they may change at
@@ -32,24 +33,6 @@
static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
-// Not valid until initialized by AudioFlinger constructor. It would have to be
-// re-initialized if the process containing AudioFlinger service forks (which it doesn't).
-// This is often used to validate binder interface calls within audioserver
-// (e.g. AudioPolicyManager to AudioFlinger).
-pid_t getpid_cached;
-
-// A trusted calling UID may specify the client UID as part of a binder interface call.
-// otherwise the calling UID must be equal to the client UID.
-bool isTrustedCallingUid(uid_t uid) {
- switch (uid) {
- case AID_MEDIA:
- case AID_AUDIOSERVER:
- return true;
- default:
- return false;
- }
-}
-
static String16 resolveCallingPackage(PermissionController& permissionController,
const String16& opPackageName, uid_t uid) {
if (opPackageName.size() > 0) {
@@ -71,16 +54,11 @@
return packages[0];
}
-static inline bool isAudioServerOrRoot(uid_t uid) {
- // AID_ROOT is OK for command-line tests. Native unforked audioserver always OK.
- return uid == AID_ROOT || uid == AID_AUDIOSERVER ;
-}
-
static bool checkRecordingInternal(const String16& opPackageName, pid_t pid,
uid_t uid, bool start) {
// Okay to not track in app ops as audio server is us and if
// device is rooted security model is considered compromised.
- if (isAudioServerOrRoot(uid)) return true;
+ if (isAudioServerOrRootUid(uid)) return true;
// We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
// may open a record track on behalf of a client. Note that pid may be a tid.
@@ -127,7 +105,7 @@
void finishRecording(const String16& opPackageName, uid_t uid) {
// Okay to not track in app ops as audio server is us and if
// device is rooted security model is considered compromised.
- if (isAudioServerOrRoot(uid)) return;
+ if (isAudioServerOrRootUid(uid)) return;
PermissionController permissionController;
String16 resolvedOpPackageName = resolveCallingPackage(
@@ -142,7 +120,7 @@
}
bool captureAudioOutputAllowed(pid_t pid, uid_t uid) {
- if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
+ if (isAudioServerOrRootUid(uid)) return true;
static const String16 sCaptureAudioOutput("android.permission.CAPTURE_AUDIO_OUTPUT");
bool ok = PermissionCache::checkPermission(sCaptureAudioOutput, pid, uid);
if (!ok) ALOGE("Request requires android.permission.CAPTURE_AUDIO_OUTPUT");
@@ -163,7 +141,8 @@
}
bool settingsAllowed() {
- if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
+ // given this is a permission check, could this be isAudioServerOrRootUid()?
+ if (isAudioServerUid(IPCThreadState::self()->getCallingUid())) return true;
static const String16 sAudioSettings("android.permission.MODIFY_AUDIO_SETTINGS");
// IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
bool ok = PermissionCache::checkCallingPermission(sAudioSettings);
@@ -179,8 +158,28 @@
return ok;
}
+bool modifyDefaultAudioEffectsAllowed() {
+ static const String16 sModifyDefaultAudioEffectsAllowed(
+ "android.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+ // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+ bool ok = PermissionCache::checkCallingPermission(sModifyDefaultAudioEffectsAllowed);
+
+#ifdef TARGET_ANDROID_THINGS
+ if (!ok) {
+ // Use a secondary permission on Android Things to allow a more lenient level of protection.
+ static const String16 sModifyDefaultAudioEffectsAndroidThingsAllowed(
+ "com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+ ok = PermissionCache::checkCallingPermission(
+ sModifyDefaultAudioEffectsAndroidThingsAllowed);
+ }
+ if (!ok) ALOGE("com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+#else
+ if (!ok) ALOGE("android.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
+#endif
+ return ok;
+}
+
bool dumpAllowed() {
- // don't optimize for same pid, since mediaserver never dumps itself
static const String16 sDump("android.permission.DUMP");
// IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
bool ok = PermissionCache::checkCallingPermission(sDump);
@@ -196,4 +195,29 @@
return ok;
}
+status_t checkIMemory(const sp<IMemory>& iMemory)
+{
+ if (iMemory == 0) {
+ ALOGE("%s check failed: NULL IMemory pointer", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ sp<IMemoryHeap> heap = iMemory->getMemory();
+ if (heap == 0) {
+ ALOGE("%s check failed: NULL heap pointer", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ off_t size = lseek(heap->getHeapID(), 0, SEEK_END);
+ lseek(heap->getHeapID(), 0, SEEK_SET);
+
+ if (iMemory->pointer() == NULL || size < (off_t)iMemory->size()) {
+ ALOGE("%s check failed: pointer %p size %zu fd size %u",
+ __FUNCTION__, iMemory->pointer(), iMemory->size(), (uint32_t)size);
+ return BAD_VALUE;
+ }
+
+ return NO_ERROR;
+}
+
} // namespace android
diff --git a/media/libmedia/TimeCheck.cpp b/media/utils/TimeCheck.cpp
similarity index 91%
rename from media/libmedia/TimeCheck.cpp
rename to media/utils/TimeCheck.cpp
index dab5d4f..59cf4ef 100644
--- a/media/libmedia/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -15,7 +15,9 @@
*/
+#include <utils/Log.h>
#include <media/TimeCheck.h>
+#include <media/EventLog.h>
namespace android {
@@ -81,7 +83,10 @@
status = mCond.waitRelative(mMutex, waitTimeNs);
}
}
- LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "TimeCheck timeout for %s", tag);
+ if (status != NO_ERROR) {
+ LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
+ LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
+ }
return true;
}
diff --git a/media/utils/include/mediautils/EventLog.h b/media/utils/include/mediautils/EventLog.h
new file mode 100644
index 0000000..553d3bd
--- /dev/null
+++ b/media/utils/include/mediautils/EventLog.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_AUDIO_EVENT_LOG_H_
+#define ANDROID_AUDIO_EVENT_LOG_H_
+
+namespace android {
+
+// keep values in sync with frameworks/av/media/utils/EventLogTags.logtags
+enum {
+ LOGTAG_AUDIO_BINDER_TIMEOUT = 61000,
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_EVENT_LOG_H_
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
new file mode 100644
index 0000000..98f54c2
--- /dev/null
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include <binder/IMemory.h>
+#include <binder/PermissionController.h>
+#include <cutils/multiuser.h>
+#include <private/android_filesystem_config.h>
+
+namespace android {
+
+// Audio permission utilities
+
+// Used for calls that should originate from system services.
+// We allow that some services might have separate processes to
+// handle multiple users, e.g. u10_system, u10_bluetooth, u10_radio.
+static inline bool isServiceUid(uid_t uid) {
+ return multiuser_get_app_id(uid) < AID_APP_START;
+}
+
+// Used for calls that should originate from audioserver.
+static inline bool isAudioServerUid(uid_t uid) {
+ return uid == AID_AUDIOSERVER;
+}
+
+// Used for some permission checks.
+// AID_ROOT is OK for command-line tests. Native audioserver always OK.
+static inline bool isAudioServerOrRootUid(uid_t uid) {
+ return uid == AID_AUDIOSERVER || uid == AID_ROOT;
+}
+
+// Used for calls that should come from system server or internal.
+// Note: system server is multiprocess for multiple users. audioserver is not.
+static inline bool isAudioServerOrSystemServerUid(uid_t uid) {
+ return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER;
+}
+
+// Mediaserver may forward the client PID and UID as part of a binder interface call;
+// otherwise the calling UID must be equal to the client UID.
+static inline bool isAudioServerOrMediaServerUid(uid_t uid) {
+ switch (uid) {
+ case AID_MEDIA:
+ case AID_AUDIOSERVER:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
+void finishRecording(const String16& opPackageName, uid_t uid);
+bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
+bool captureHotwordAllowed(pid_t pid, uid_t uid);
+bool settingsAllowed();
+bool modifyAudioRoutingAllowed();
+bool modifyDefaultAudioEffectsAllowed();
+bool dumpAllowed();
+bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
+status_t checkIMemory(const sp<IMemory>& iMemory);
+}
diff --git a/media/libmedia/include/media/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
similarity index 100%
rename from media/libmedia/include/media/TimeCheck.h
rename to media/utils/include/mediautils/TimeCheck.h
diff --git a/packages/MediaComponents/Android.mk b/packages/MediaComponents/Android.mk
index def9dc5..55a5424 100644
--- a/packages/MediaComponents/Android.mk
+++ b/packages/MediaComponents/Android.mk
@@ -42,7 +42,7 @@
#
#LOCAL_MULTILIB := first
#
-#LOCAL_JAVA_LIBRARIES += android-support-annotations
+#LOCAL_JAVA_LIBRARIES += androidx.annotation_annotation
#
## To embed native libraries in package, uncomment the lines below.
##LOCAL_MODULE_TAGS := samples
@@ -60,9 +60,9 @@
#
## TODO: Remove dependency with other support libraries.
#LOCAL_STATIC_ANDROID_LIBRARIES += \
-# android-support-v4 \
-# android-support-v7-appcompat \
-# android-support-v7-palette
+# androidx.legacy_legacy-support-v4 \
+# androidx.appcompat_appcompat \
+# androidx.palette_palette
#LOCAL_USE_AAPT2 := true
#
#include $(BUILD_PACKAGE)
diff --git a/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml b/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
index b304471..f6f7be5 100644
--- a/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
+++ b/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
@@ -169,7 +169,7 @@
android:layout_height="wrap_content"
android:fillViewport="true"
android:scrollIndicators="top|bottom">
- <android.support.v7.widget.ButtonBarLayout
+ <androidx.appcompat.widget.ButtonBarLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:gravity="bottom"
@@ -184,7 +184,7 @@
style="?android:attr/buttonBarNeutralButtonStyle"
android:layout_width="wrap_content"
android:layout_height="wrap_content"/>
- <android.support.v4.widget.Space
+ <androidx.legacy.widget.Space
android:id="@+id/spacer"
android:layout_width="0dp"
android:layout_height="0dp"
@@ -200,7 +200,7 @@
style="?android:attr/buttonBarPositiveButtonStyle"
android:layout_width="wrap_content"
android:layout_height="wrap_content"/>
- </android.support.v7.widget.ButtonBarLayout>
+ </androidx.appcompat.widget.ButtonBarLayout>
</ScrollView>
</LinearLayout>
</FrameLayout>
diff --git a/packages/MediaComponents/res/layout/mr_controller_volume_item.xml b/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
index a89058b..12d85ae 100644
--- a/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
+++ b/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
@@ -40,7 +40,7 @@
android:layout_marginBottom="8dp"
android:scaleType="fitCenter"
android:src="?attr/mediaRouteAudioTrackDrawable" />
- <android.support.v7.app.MediaRouteVolumeSlider
+ <androidx.mediarouter.app.MediaRouteVolumeSlider
android:id="@+id/mr_volume_slider"
android:layout_width="fill_parent"
android:layout_height="40dp"
diff --git a/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java b/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
index c909099..0327beb 100644
--- a/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
@@ -19,7 +19,6 @@
import android.content.Context;
import android.media.MediaBrowser2;
import android.media.MediaBrowser2.BrowserCallback;
-import android.media.MediaController2;
import android.media.MediaItem2;
import android.media.SessionToken2;
import android.media.update.MediaBrowser2Provider;
diff --git a/packages/MediaComponents/src/com/android/media/MediaController2Impl.java b/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
index 249365a..2883087 100644
--- a/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
@@ -16,7 +16,6 @@
package com.android.media;
-import static android.media.SessionCommand2.COMMAND_CODE_SET_VOLUME;
import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_ADD_ITEM;
import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_REMOVE_ITEM;
import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_REPLACE_ITEM;
@@ -30,6 +29,7 @@
import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_MEDIA_ID;
import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_SEARCH;
import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_URI;
+import static android.media.SessionCommand2.COMMAND_CODE_SET_VOLUME;
import android.app.PendingIntent;
import android.content.ComponentName;
@@ -44,11 +44,11 @@
import android.media.MediaMetadata2;
import android.media.MediaPlaylistAgent.RepeatMode;
import android.media.MediaPlaylistAgent.ShuffleMode;
-import android.media.SessionCommand2;
import android.media.MediaSession2.CommandButton;
-import android.media.SessionCommandGroup2;
import android.media.MediaSessionService2;
import android.media.Rating2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
import android.media.SessionToken2;
import android.media.update.MediaController2Provider;
import android.net.Uri;
@@ -58,10 +58,11 @@
import android.os.RemoteException;
import android.os.ResultReceiver;
import android.os.UserHandle;
-import android.support.annotation.GuardedBy;
import android.text.TextUtils;
import android.util.Log;
+import androidx.annotation.GuardedBy;
+
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Executor;
diff --git a/packages/MediaComponents/src/com/android/media/MediaController2Stub.java b/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
index 2cfc5df..ece4a00 100644
--- a/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
+++ b/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
@@ -21,8 +21,8 @@
import android.media.MediaController2;
import android.media.MediaItem2;
import android.media.MediaMetadata2;
-import android.media.SessionCommand2;
import android.media.MediaSession2.CommandButton;
+import android.media.SessionCommand2;
import android.media.SessionCommandGroup2;
import android.os.Bundle;
import android.os.ResultReceiver;
diff --git a/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
index 4ec6042..72ecf54 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
@@ -44,13 +44,13 @@
import android.media.MediaPlaylistAgent.PlaylistEventCallback;
import android.media.MediaSession2;
import android.media.MediaSession2.Builder;
-import android.media.SessionCommand2;
import android.media.MediaSession2.CommandButton;
-import android.media.SessionCommandGroup2;
import android.media.MediaSession2.ControllerInfo;
import android.media.MediaSession2.OnDataSourceMissingHelper;
import android.media.MediaSession2.SessionCallback;
import android.media.MediaSessionService2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
import android.media.SessionToken2;
import android.media.VolumeProvider2;
import android.media.session.MediaSessionManager;
@@ -60,10 +60,11 @@
import android.os.Parcelable;
import android.os.Process;
import android.os.ResultReceiver;
-import android.support.annotation.GuardedBy;
import android.text.TextUtils;
import android.util.Log;
+import androidx.annotation.GuardedBy;
+
import java.lang.ref.WeakReference;
import java.lang.reflect.Field;
import java.util.ArrayList;
diff --git a/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java b/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
index ec657d7..11ccd9f 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
@@ -22,11 +22,11 @@
import android.media.MediaItem2;
import android.media.MediaLibraryService2.LibraryRoot;
import android.media.MediaMetadata2;
-import android.media.SessionCommand2;
import android.media.MediaSession2.CommandButton;
-import android.media.SessionCommandGroup2;
import android.media.MediaSession2.ControllerInfo;
import android.media.Rating2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
import android.media.VolumeProvider2;
import android.net.Uri;
import android.os.Binder;
@@ -35,13 +35,14 @@
import android.os.IBinder;
import android.os.RemoteException;
import android.os.ResultReceiver;
-import android.support.annotation.GuardedBy;
-import android.support.annotation.NonNull;
import android.text.TextUtils;
import android.util.ArrayMap;
import android.util.Log;
import android.util.SparseArray;
+import androidx.annotation.GuardedBy;
+import androidx.annotation.NonNull;
+
import com.android.media.MediaLibraryService2Impl.MediaLibrarySessionImpl;
import com.android.media.MediaSession2Impl.CommandButtonImpl;
import com.android.media.MediaSession2Impl.CommandGroupImpl;
diff --git a/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java b/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
index c33eb65..d975839 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
@@ -20,7 +20,6 @@
import android.app.Notification;
import android.app.NotificationManager;
-import android.content.Context;
import android.content.Intent;
import android.media.MediaPlayerBase;
import android.media.MediaPlayerBase.PlayerEventCallback;
@@ -31,9 +30,10 @@
import android.media.SessionToken2.TokenType;
import android.media.update.MediaSessionService2Provider;
import android.os.IBinder;
-import android.support.annotation.GuardedBy;
import android.util.Log;
+import androidx.annotation.GuardedBy;
+
// TODO(jaewan): Need a test for session service itself.
public class MediaSessionService2Impl implements MediaSessionService2Provider {
diff --git a/packages/MediaComponents/src/com/android/media/Rating2Impl.java b/packages/MediaComponents/src/com/android/media/Rating2Impl.java
index d558129..e2b9f0a 100644
--- a/packages/MediaComponents/src/com/android/media/Rating2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/Rating2Impl.java
@@ -18,7 +18,6 @@
import static android.media.Rating2.*;
-import android.content.Context;
import android.media.Rating2;
import android.media.Rating2.Style;
import android.media.update.Rating2Provider;
diff --git a/packages/MediaComponents/src/com/android/media/RoutePlayer.java b/packages/MediaComponents/src/com/android/media/RoutePlayer.java
index 9450d34..ebff0e2 100644
--- a/packages/MediaComponents/src/com/android/media/RoutePlayer.java
+++ b/packages/MediaComponents/src/com/android/media/RoutePlayer.java
@@ -23,7 +23,8 @@
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
-import android.support.annotation.RequiresApi;
+
+import androidx.annotation.RequiresApi;
import com.android.support.mediarouter.media.MediaItemStatus;
import com.android.support.mediarouter.media.MediaRouter;
@@ -33,8 +34,6 @@
import com.android.support.mediarouter.media.RemotePlaybackClient.SessionActionCallback;
import com.android.support.mediarouter.media.RemotePlaybackClient.StatusCallback;
-import java.util.Map;
-
@RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
public class RoutePlayer extends MediaSession.Callback {
public static final long PLAYBACK_ACTIONS = PlaybackState.ACTION_PAUSE
diff --git a/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java b/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
index a5cf8c4..f792712 100644
--- a/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
@@ -16,9 +16,9 @@
package com.android.media;
+import static android.media.SessionToken2.TYPE_LIBRARY_SERVICE;
import static android.media.SessionToken2.TYPE_SESSION;
import static android.media.SessionToken2.TYPE_SESSION_SERVICE;
-import static android.media.SessionToken2.TYPE_LIBRARY_SERVICE;
import android.content.Context;
import android.content.Intent;
diff --git a/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
index a4d55d7..97d3927 100644
--- a/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
+++ b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
@@ -16,12 +16,8 @@
package com.android.media.subtitle;
-import java.util.Locale;
-import java.util.Vector;
-
import android.content.Context;
import android.media.MediaFormat;
-import android.media.MediaPlayer2;
import android.media.MediaPlayer2.TrackInfo;
import android.os.Handler;
import android.os.Looper;
@@ -30,6 +26,9 @@
import com.android.media.subtitle.SubtitleTrack.RenderingWidget;
+import java.util.Locale;
+import java.util.Vector;
+
// Note: This is forked from android.media.SubtitleController since P
/**
* The subtitle controller provides the architecture to display subtitles for a
diff --git a/packages/MediaComponents/src/com/android/media/update/ApiFactory.java b/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
index d7be549..f75b75e 100644
--- a/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
+++ b/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
@@ -31,13 +31,13 @@
import android.media.MediaMetadata2;
import android.media.MediaPlaylistAgent;
import android.media.MediaSession2;
-import android.media.SessionCommand2;
-import android.media.SessionCommandGroup2;
import android.media.MediaSession2.ControllerInfo;
import android.media.MediaSession2.SessionCallback;
import android.media.MediaSessionService2;
import android.media.MediaSessionService2.MediaNotification;
import android.media.Rating2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
import android.media.SessionToken2;
import android.media.VolumeProvider2;
import android.media.update.MediaBrowser2Provider;
@@ -59,11 +59,12 @@
import android.media.update.VolumeProvider2Provider;
import android.os.Bundle;
import android.os.IInterface;
-import android.support.annotation.Nullable;
import android.util.AttributeSet;
import android.widget.MediaControlView2;
import android.widget.VideoView2;
+import androidx.annotation.Nullable;
+
import com.android.media.IMediaController2;
import com.android.media.MediaBrowser2Impl;
import com.android.media.MediaController2Impl;
diff --git a/packages/MediaComponents/src/com/android/media/update/ApiHelper.java b/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
index ad8bb48..dc5e5e2 100644
--- a/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
+++ b/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
@@ -18,21 +18,21 @@
import android.annotation.Nullable;
import android.content.Context;
-import android.content.ContextWrapper;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageManager.NameNotFoundException;
import android.content.res.Resources;
import android.content.res.Resources.Theme;
import android.content.res.XmlResourceParser;
-import android.support.annotation.GuardedBy;
-import android.support.v4.widget.Space;
-import android.support.v7.widget.ButtonBarLayout;
import android.util.AttributeSet;
import android.view.ContextThemeWrapper;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
+import androidx.annotation.GuardedBy;
+import androidx.appcompat.widget.ButtonBarLayout;
+import androidx.legacy.widget.Space;
+
import com.android.support.mediarouter.app.MediaRouteButton;
import com.android.support.mediarouter.app.MediaRouteExpandCollapseButton;
import com.android.support.mediarouter.app.MediaRouteVolumeSlider;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
index d3e8d47..98c0d17 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
@@ -19,11 +19,12 @@
import android.annotation.NonNull;
import android.annotation.Nullable;
import android.content.Context;
-import android.support.v4.view.ActionProvider;
import android.util.Log;
import android.view.View;
import android.view.ViewGroup;
+import androidx.core.view.ActionProvider;
+
import com.android.support.mediarouter.media.MediaRouteSelector;
import com.android.support.mediarouter.media.MediaRouter;
@@ -48,7 +49,7 @@
* <h3>Prerequisites</h3>
* <p>
* To use the media route action provider, the activity must be a subclass of
- * {@link AppCompatActivity} from the <code>android.support.v7.appcompat</code>
+ * {@link AppCompatActivity} from the <code>androidx.appcompat.appcompat</code>
* support library. Refer to support library documentation for details.
* </p>
*
@@ -65,7 +66,7 @@
* <item android:id="@+id/media_route_menu_item"
* android:title="@string/media_route_menu_title"
* app:showAsAction="always"
- * app:actionProviderClass="android.support.v7.app.MediaRouteActionProvider"/>
+ * app:actionProviderClass="androidx.mediarouter.app.MediaRouteActionProvider"/>
* </menu>
* </pre><p>
* Then configure the menu and set the route selector for the chooser.
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
index fde8a63..e82fcb9 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
@@ -28,14 +28,15 @@
import android.graphics.drawable.AnimationDrawable;
import android.graphics.drawable.Drawable;
import android.os.AsyncTask;
-import android.support.v4.graphics.drawable.DrawableCompat;
-import android.support.v7.widget.TooltipCompat;
import android.util.AttributeSet;
import android.util.Log;
import android.util.SparseArray;
import android.view.SoundEffectConstants;
import android.view.View;
+import androidx.appcompat.widget.TooltipCompat;
+import androidx.core.graphics.drawable.DrawableCompat;
+
import com.android.media.update.ApiHelper;
import com.android.media.update.R;
import com.android.support.mediarouter.media.MediaRouteSelector;
@@ -70,7 +71,7 @@
* <h3>Prerequisites</h3>
* <p>
* To use the media route button, the activity must be a subclass of
- * {@link FragmentActivity} from the <code>android.support.v4</code>
+ * {@link FragmentActivity} from the <code>androidx.core./code>
* support library. Refer to support library documentation for details.
* </p>
*
@@ -81,9 +82,9 @@
private static final String TAG = "MediaRouteButton";
private static final String CHOOSER_FRAGMENT_TAG =
- "android.support.v7.mediarouter:MediaRouteChooserDialogFragment";
+ "androidx.mediarouter.media.outer:MediaRouteChooserDialogFragment";
private static final String CONTROLLER_FRAGMENT_TAG =
- "android.support.v7.mediarouter:MediaRouteControllerDialogFragment";
+ "androidx.mediarouter.media.outer:MediaRouteControllerDialogFragment";
private final MediaRouter mRouter;
private final MediaRouterCallback mCallback;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
index cac64d9..f24028a 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
@@ -16,13 +16,14 @@
package com.android.support.mediarouter.app;
-import static com.android.support.mediarouter.media.MediaRouter.RouteInfo.CONNECTION_STATE_CONNECTED;
-import static com.android.support.mediarouter.media.MediaRouter.RouteInfo.CONNECTION_STATE_CONNECTING;
+import static com.android.support.mediarouter.media.MediaRouter.RouteInfo
+ .CONNECTION_STATE_CONNECTED;
+import static com.android.support.mediarouter.media.MediaRouter.RouteInfo
+ .CONNECTION_STATE_CONNECTING;
import android.annotation.NonNull;
import android.app.Dialog;
import android.content.Context;
-import android.content.res.Resources;
import android.content.res.TypedArray;
import android.graphics.drawable.Drawable;
import android.net.Uri;
@@ -30,12 +31,10 @@
import android.os.Handler;
import android.os.Message;
import android.os.SystemClock;
-import android.support.v7.app.AppCompatDialog;
import android.text.TextUtils;
import android.util.Log;
import android.view.ContextThemeWrapper;
import android.view.Gravity;
-import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
index 060cfca..f6c1d2f 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
@@ -40,9 +40,6 @@
import android.support.v4.media.session.MediaControllerCompat;
import android.support.v4.media.session.MediaSessionCompat;
import android.support.v4.media.session.PlaybackStateCompat;
-import android.support.v4.util.ObjectsCompat;
-import android.support.v4.view.accessibility.AccessibilityEventCompat;
-import android.support.v7.graphics.Palette;
import android.text.TextUtils;
import android.util.Log;
import android.view.ContextThemeWrapper;
@@ -72,11 +69,15 @@
import android.widget.SeekBar;
import android.widget.TextView;
+import androidx.core.util.ObjectsCompat;
+import androidx.core.view.accessibility.AccessibilityEventCompat;
+import androidx.palette.graphics.Palette;
+
import com.android.media.update.ApiHelper;
import com.android.media.update.R;
+import com.android.support.mediarouter.app.OverlayListView.OverlayObject;
import com.android.support.mediarouter.media.MediaRouteSelector;
import com.android.support.mediarouter.media.MediaRouter;
-import com.android.support.mediarouter.app.OverlayListView.OverlayObject;
import java.io.BufferedInputStream;
import java.io.IOException;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
index a9eaf39..b5ee63e 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
@@ -16,7 +16,7 @@
package com.android.support.mediarouter.app;
-import android.support.annotation.NonNull;
+import androidx.annotation.NonNull;
/**
* The media route dialog factory is responsible for creating the media route
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
index 02ee118..52aecd88 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
@@ -17,10 +17,11 @@
package com.android.support.mediarouter.app;
import android.os.Bundle;
-import android.support.v4.app.Fragment;
-import com.android.support.mediarouter.media.MediaRouter;
+import androidx.fragment.app.Fragment;
+
import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
/**
* Media route discovery fragment.
@@ -34,7 +35,7 @@
* provide the {@link MediaRouter} callback to register.
* </p><p>
* Note that the discovery callback makes the application be connected with all the
- * {@link android.support.v7.media.MediaRouteProviderService media route provider services}
+ * {@link androidx.mediarouter.media.MediaRouteProviderService media route provider services}
* while it is registered.
* </p>
*/
@@ -114,7 +115,7 @@
}
/**
- * Called to create the {@link android.support.v7.media.MediaRouter.Callback callback}
+ * Called to create the {@link androidx.mediarouter.media.MediaRouter.Callback callback}
* that will be registered.
* <p>
* The default callback does nothing. The application may override this method to
@@ -129,7 +130,7 @@
/**
* Called to prepare the callback flags that will be used when the
- * {@link android.support.v7.media.MediaRouter.Callback callback} is registered.
+ * {@link androidx.mediarouter.media.MediaRouter.Callback callback} is registered.
* <p>
* The default implementation returns {@link MediaRouter#CALLBACK_FLAG_REQUEST_DISCOVERY}.
* </p>
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
index 6a0a95a..dcca6a0 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
@@ -21,7 +21,6 @@
import android.graphics.PorterDuff;
import android.graphics.PorterDuffColorFilter;
import android.graphics.drawable.AnimationDrawable;
-import android.support.v4.content.ContextCompat;
import android.util.AttributeSet;
import android.view.View;
import android.widget.ImageButton;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
index 63f042f..b4bf8d1 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
@@ -19,12 +19,13 @@
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Color;
-import android.support.annotation.IntDef;
-import android.support.v4.graphics.ColorUtils;
import android.util.TypedValue;
import android.view.ContextThemeWrapper;
import android.view.View;
+import androidx.annotation.IntDef;
+import androidx.core.graphics.ColorUtils;
+
import com.android.media.update.R;
import java.lang.annotation.Retention;
@@ -170,7 +171,7 @@
private static boolean isLightTheme(Context context) {
TypedValue value = new TypedValue();
// TODO(sungsoo): Switch to com.android.internal.R.attr.isLightTheme
- return context.getTheme().resolveAttribute(android.support.v7.appcompat.R.attr.isLightTheme,
+ return context.getTheme().resolveAttribute(androidx.appcompat.R.attr.isLightTheme,
value, true) && value.data != 0;
}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
index f8539bd..5a0bc95 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
@@ -20,7 +20,6 @@
import android.hardware.display.DisplayManager;
import android.os.Build;
import android.os.Handler;
-import android.support.annotation.RequiresApi;
import android.util.Log;
import android.view.Display;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
index 90ea2d5..92f608b 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
@@ -19,7 +19,8 @@
import android.app.PendingIntent;
import android.os.Bundle;
import android.os.SystemClock;
-import android.support.v4.util.TimeUtils;
+
+import androidx.core.util.TimeUtils;
/**
* Describes the playback status of a media item.
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
index 91a2e1a..7ea328c 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
@@ -23,7 +23,8 @@
import android.content.Intent;
import android.os.Handler;
import android.os.Message;
-import android.support.v4.util.ObjectsCompat;
+
+import androidx.core.util.ObjectsCompat;
import com.android.support.mediarouter.media.MediaRouter.ControlRequestCallback;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
index 43cde10..a186fee 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
@@ -29,12 +29,14 @@
.CLIENT_MSG_RELEASE_ROUTE_CONTROLLER;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_ROUTE_CONTROL_REQUEST;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_SELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_SELECT_ROUTE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_SET_DISCOVERY_REQUEST;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_SET_ROUTE_VOLUME;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_UNREGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_UNREGISTER;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_UNSELECT_ROUTE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
@@ -51,9 +53,12 @@
.SERVICE_MSG_GENERIC_FAILURE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.SERVICE_MSG_GENERIC_SUCCESS;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_MSG_REGISTERED;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_VERSION_CURRENT;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.isValidRemoteMessenger;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_REGISTERED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_VERSION_CURRENT;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .isValidRemoteMessenger;
import android.app.Service;
import android.content.Intent;
@@ -65,11 +70,12 @@
import android.os.Message;
import android.os.Messenger;
import android.os.RemoteException;
-import android.support.annotation.VisibleForTesting;
-import android.support.v4.util.ObjectsCompat;
import android.util.Log;
import android.util.SparseArray;
+import androidx.annotation.VisibleForTesting;
+import androidx.core.util.ObjectsCompat;
+
import java.lang.ref.WeakReference;
import java.util.ArrayList;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
index 5669b19..f20dcc0 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
@@ -17,8 +17,9 @@
import android.content.IntentFilter;
import android.os.Bundle;
-import android.support.annotation.NonNull;
-import android.support.annotation.Nullable;
+
+import androidx.annotation.NonNull;
+import androidx.annotation.Nullable;
import java.util.ArrayList;
import java.util.Arrays;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
index db0052e..4b56b19 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
@@ -33,15 +33,16 @@
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
-import android.support.v4.app.ActivityManagerCompat;
-import android.support.v4.hardware.display.DisplayManagerCompat;
-import android.support.v4.media.VolumeProviderCompat;
import android.support.v4.media.session.MediaSessionCompat;
-import android.support.v4.util.Pair;
import android.text.TextUtils;
import android.util.Log;
import android.view.Display;
+import androidx.core.app.ActivityManagerCompat;
+import androidx.core.hardware.display.DisplayManagerCompat;
+import androidx.core.util.Pair;
+import androidx.media.VolumeProviderCompat;
+
import com.android.support.mediarouter.media.MediaRouteProvider.ProviderMetadata;
import com.android.support.mediarouter.media.MediaRouteProvider.RouteController;
@@ -81,13 +82,13 @@
static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
/**
- * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
* and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the reason the route
* was unselected is unknown.
*/
public static final int UNSELECT_REASON_UNKNOWN = 0;
/**
- * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
* and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user pressed
* the disconnect button to disconnect and keep playing.
* <p>
@@ -96,13 +97,13 @@
*/
public static final int UNSELECT_REASON_DISCONNECTED = 1;
/**
- * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
* and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user pressed
* the stop casting button.
*/
public static final int UNSELECT_REASON_STOPPED = 2;
/**
- * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
* and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user selected
* a different route.
*/
@@ -174,7 +175,7 @@
* Applications should typically add a callback using this flag in the
* {@link android.app.Activity activity's} {@link android.app.Activity#onStart onStart}
* method and remove it in the {@link android.app.Activity#onStop onStop} method.
- * The {@link android.support.v7.app.MediaRouteDiscoveryFragment} fragment may
+ * The {@link androidx.mediarouter.app.MediaRouteDiscoveryFragment} fragment may
* also be used for this purpose.
* </p><p class="note">
* On {@link ActivityManager#isLowRamDevice low-RAM devices} this flag
@@ -182,7 +183,7 @@
* {@link #addCallback(MediaRouteSelector, Callback, int) addCallback} for details.
* </p>
*
- * @see android.support.v7.app.MediaRouteDiscoveryFragment
+ * @see androidx.mediarouter.app.MediaRouteDiscoveryFragment
*/
public static final int CALLBACK_FLAG_REQUEST_DISCOVERY = 1 << 2;
@@ -197,7 +198,7 @@
* {@link #addCallback(MediaRouteSelector, Callback, int) addCallback} for details.
* </p>
*
- * @see android.support.v7.app.MediaRouteDiscoveryFragment
+ * @see androidx.mediarouter.app.MediaRouteDiscoveryFragment
*/
public static final int CALLBACK_FLAG_FORCE_DISCOVERY = 1 << 3;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
index 3206596..0e7514c 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
@@ -19,7 +19,8 @@
import android.app.PendingIntent;
import android.os.Bundle;
import android.os.SystemClock;
-import android.support.v4.util.TimeUtils;
+
+import androidx.core.util.TimeUtils;
/**
* Describes the playback status of a media session.
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
index 98e4e28..eacf1c8 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
@@ -29,17 +29,20 @@
.CLIENT_MSG_RELEASE_ROUTE_CONTROLLER;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_ROUTE_CONTROL_REQUEST;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_SELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_SELECT_ROUTE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_SET_DISCOVERY_REQUEST;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_SET_ROUTE_VOLUME;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_UNREGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_UNREGISTER;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_UNSELECT_ROUTE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_UPDATE_ROUTE_VOLUME;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_VERSION_CURRENT;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_VERSION_CURRENT;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_DATA_ERROR;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.SERVICE_MSG_CONTROL_REQUEST_FAILED;
@@ -51,9 +54,11 @@
.SERVICE_MSG_GENERIC_FAILURE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.SERVICE_MSG_GENERIC_SUCCESS;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_MSG_REGISTERED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_REGISTERED;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_VERSION_1;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.isValidRemoteMessenger;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .isValidRemoteMessenger;
import android.annotation.NonNull;
import android.content.ComponentName;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
index 826449b..65c5518 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
@@ -18,7 +18,6 @@
import android.content.Context;
import android.media.AudioManager;
import android.os.Build;
-import android.support.annotation.RequiresApi;
import java.lang.ref.WeakReference;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
index f6e1497..e76564e 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
@@ -22,9 +22,10 @@
import android.content.IntentFilter;
import android.net.Uri;
import android.os.Bundle;
-import android.support.v4.util.ObjectsCompat;
import android.util.Log;
+import androidx.core.util.ObjectsCompat;
+
/**
* A helper class for playing media on remote routes using the remote playback protocol
* defined by {@link MediaControlIntent}.
@@ -867,11 +868,11 @@
private final class ActionReceiver extends BroadcastReceiver {
public static final String ACTION_ITEM_STATUS_CHANGED =
- "android.support.v7.media.actions.ACTION_ITEM_STATUS_CHANGED";
+ "androidx.mediarouter.media.actions.ACTION_ITEM_STATUS_CHANGED";
public static final String ACTION_SESSION_STATUS_CHANGED =
- "android.support.v7.media.actions.ACTION_SESSION_STATUS_CHANGED";
+ "androidx.mediarouter.media.actions.ACTION_SESSION_STATUS_CHANGED";
public static final String ACTION_MESSAGE_RECEIVED =
- "android.support.v7.media.actions.ACTION_MESSAGE_RECEIVED";
+ "androidx.mediarouter.media.actions.ACTION_MESSAGE_RECEIVED";
ActionReceiver() {
}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
index a38491f..53901a4 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
@@ -24,7 +24,6 @@
import android.content.res.Resources;
import android.media.AudioManager;
import android.os.Build;
-import android.support.annotation.RequiresApi;
import android.view.Display;
import com.android.media.update.ApiHelper;
diff --git a/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java b/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
index 3aff150..ad85af4 100644
--- a/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
+++ b/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
@@ -20,15 +20,13 @@
import android.content.res.Resources;
import android.graphics.Point;
import android.media.MediaMetadata;
+import android.media.SessionToken2;
import android.media.session.MediaController;
import android.media.session.PlaybackState;
-import android.media.SessionToken2;
import android.media.update.MediaControlView2Provider;
import android.media.update.ViewGroupProvider;
import android.os.Bundle;
-import android.support.annotation.Nullable;
import android.util.AttributeSet;
-import android.util.Log;
import android.view.Gravity;
import android.view.MotionEvent;
import android.view.View;
@@ -36,27 +34,28 @@
import android.view.WindowManager;
import android.widget.AdapterView;
import android.widget.BaseAdapter;
-import android.widget.Button;
import android.widget.ImageButton;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.ListView;
import android.widget.MediaControlView2;
-import android.widget.ProgressBar;
import android.widget.PopupWindow;
+import android.widget.ProgressBar;
import android.widget.RelativeLayout;
import android.widget.SeekBar;
import android.widget.SeekBar.OnSeekBarChangeListener;
import android.widget.TextView;
+import androidx.annotation.Nullable;
+
import com.android.media.update.ApiHelper;
import com.android.media.update.R;
import com.android.support.mediarouter.app.MediaRouteButton;
-import com.android.support.mediarouter.media.MediaRouter;
import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
-import java.util.Arrays;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Formatter;
import java.util.List;
import java.util.Locale;
diff --git a/packages/MediaComponents/src/com/android/widget/SubtitleView.java b/packages/MediaComponents/src/com/android/widget/SubtitleView.java
index 67b2cd1..db0ae33 100644
--- a/packages/MediaComponents/src/com/android/widget/SubtitleView.java
+++ b/packages/MediaComponents/src/com/android/widget/SubtitleView.java
@@ -19,10 +19,11 @@
import android.content.Context;
import android.graphics.Canvas;
import android.os.Looper;
-import android.support.annotation.Nullable;
import android.util.AttributeSet;
import android.widget.FrameLayout;
+import androidx.annotation.Nullable;
+
import com.android.media.subtitle.SubtitleController.Anchor;
import com.android.media.subtitle.SubtitleTrack.RenderingWidget;
diff --git a/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
index fc92e85..c9869c0 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
@@ -16,17 +16,18 @@
package com.android.widget;
+import static android.widget.VideoView2.VIEW_TYPE_SURFACEVIEW;
+
import android.content.Context;
import android.graphics.Rect;
import android.media.MediaPlayer2;
-import android.support.annotation.NonNull;
import android.util.AttributeSet;
import android.util.Log;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
import android.view.View;
-import static android.widget.VideoView2.VIEW_TYPE_SURFACEVIEW;
+import androidx.annotation.NonNull;
class VideoSurfaceView extends SurfaceView implements VideoViewInterface, SurfaceHolder.Callback {
private static final String TAG = "VideoSurfaceView";
diff --git a/packages/MediaComponents/src/com/android/widget/VideoTextureView.java b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
index 024a3aa..40fb046 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
@@ -16,18 +16,19 @@
package com.android.widget;
+import static android.widget.VideoView2.VIEW_TYPE_TEXTUREVIEW;
+
import android.content.Context;
import android.graphics.SurfaceTexture;
import android.media.MediaPlayer2;
-import android.support.annotation.NonNull;
-import android.support.annotation.RequiresApi;
import android.util.AttributeSet;
import android.util.Log;
import android.view.Surface;
import android.view.TextureView;
import android.view.View;
-import static android.widget.VideoView2.VIEW_TYPE_TEXTUREVIEW;
+import androidx.annotation.NonNull;
+import androidx.annotation.RequiresApi;
@RequiresApi(26)
class VideoTextureView extends TextureView
diff --git a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
index 97279d6..ffb145a 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
@@ -28,30 +28,29 @@
import android.media.AudioFocusRequest;
import android.media.AudioManager;
import android.media.DataSourceDesc;
+import android.media.MediaItem2;
import android.media.MediaMetadata;
+import android.media.MediaMetadata2;
+import android.media.MediaMetadataRetriever;
import android.media.MediaPlayer2;
import android.media.MediaPlayer2.MediaPlayer2EventCallback;
import android.media.MediaPlayer2.OnSubtitleDataListener;
import android.media.MediaPlayer2Impl;
-import android.media.SubtitleData;
-import android.media.MediaItem2;
-import android.media.MediaMetadata2;
-import android.media.MediaMetadataRetriever;
import android.media.Metadata;
import android.media.PlaybackParams;
+import android.media.SessionToken2;
+import android.media.SubtitleData;
import android.media.TimedText;
import android.media.session.MediaController;
import android.media.session.MediaController.PlaybackInfo;
import android.media.session.MediaSession;
import android.media.session.PlaybackState;
-import android.media.SessionToken2;
import android.media.update.VideoView2Provider;
import android.media.update.ViewGroupProvider;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Bundle;
import android.os.ResultReceiver;
-import android.support.annotation.Nullable;
import android.util.AttributeSet;
import android.util.DisplayMetrics;
import android.util.Log;
@@ -66,6 +65,8 @@
import android.widget.TextView;
import android.widget.VideoView2;
+import androidx.annotation.Nullable;
+
import com.android.internal.graphics.palette.Palette;
import com.android.media.RoutePlayer;
import com.android.media.subtitle.ClosedCaptionRenderer;
@@ -73,10 +74,10 @@
import com.android.media.subtitle.SubtitleTrack;
import com.android.media.update.ApiHelper;
import com.android.media.update.R;
-import com.android.support.mediarouter.media.MediaItemStatus;
import com.android.support.mediarouter.media.MediaControlIntent;
-import com.android.support.mediarouter.media.MediaRouter;
+import com.android.support.mediarouter.media.MediaItemStatus;
import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
import java.util.ArrayList;
import java.util.List;
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 7419e64..2c26ba4 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -2,24 +2,6 @@
include $(CLEAR_VARS)
-LOCAL_SRC_FILES := \
- ServiceUtilities.cpp
-
-# FIXME Move this library to frameworks/native
-LOCAL_MODULE := libserviceutility
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libutils \
- liblog \
- libbinder
-
-LOCAL_CFLAGS := -Wall -Werror
-
-include $(BUILD_SHARED_LIBRARY)
-
-include $(CLEAR_VARS)
-
LOCAL_SRC_FILES:= \
AudioFlinger.cpp \
Threads.cpp \
@@ -31,7 +13,8 @@
PatchPanel.cpp \
StateQueue.cpp \
BufLog.cpp \
- TypedLogger.cpp
+ TypedLogger.cpp \
+ NBAIO_Tee.cpp \
LOCAL_C_INCLUDES := \
frameworks/av/services/audiopolicy \
@@ -53,13 +36,14 @@
libnbaio \
libnblog \
libpowermanager \
- libserviceutility \
libmediautils \
libmemunreachable \
libmedia_helper
LOCAL_STATIC_LIBRARIES := \
libcpustats \
+ libjsoncpp \
+ libsndfile \
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
@@ -82,6 +66,7 @@
LOCAL_CFLAGS += -fvisibility=hidden
LOCAL_CFLAGS += -Werror -Wall
+LOCAL_SANITIZE := integer_overflow
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index bdd39c6..43566b7 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -20,15 +20,16 @@
//#define LOG_NDEBUG 0
#include "Configuration.h"
+#include <algorithm> // std::any_of
#include <dirent.h>
#include <math.h>
#include <signal.h>
+#include <string>
#include <sys/time.h>
#include <sys/resource.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
-#include <cutils/multiuser.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <binder/Parcel.h>
@@ -47,7 +48,7 @@
#include <system/audio.h>
#include "AudioFlinger.h"
-#include "ServiceUtilities.h"
+#include "NBAIO_Tee.h"
#include <media/AudioResamplerPublic.h>
@@ -56,7 +57,8 @@
#include <system/audio_effects/effect_aec.h>
#include <audio_utils/primitives.h>
-#include <audio_utils/string.h>
+
+#include <json/json.h>
#include <powermanager/PowerManager.h>
@@ -66,6 +68,7 @@
#include <media/nbaio/PipeReader.h>
#include <media/AudioParameter.h>
#include <mediautils/BatteryNotifier.h>
+#include <mediautils/ServiceUtilities.h>
#include <private/android_filesystem_config.h>
//#define BUFLOG_NDEBUG 0
@@ -100,17 +103,6 @@
uint32_t AudioFlinger::mScreenState;
-
-#ifdef TEE_SINK
-bool AudioFlinger::mTeeSinkInputEnabled = false;
-bool AudioFlinger::mTeeSinkOutputEnabled = false;
-bool AudioFlinger::mTeeSinkTrackEnabled = false;
-
-size_t AudioFlinger::mTeeSinkInputFrames = kTeeSinkInputFramesDefault;
-size_t AudioFlinger::mTeeSinkOutputFrames = kTeeSinkOutputFramesDefault;
-size_t AudioFlinger::mTeeSinkTrackFrames = kTeeSinkTrackFramesDefault;
-#endif
-
// In order to avoid invalidating offloaded tracks each time a Visualizer is turned on and off
// we define a minimum time during which a global effect is considered enabled.
static const nsecs_t kMinGlobalEffectEnabletimeNs = seconds(7200);
@@ -160,6 +152,7 @@
mTotalMemory(0),
mClientSharedHeapSize(kMinimumClientSharedHeapSizeBytes),
mGlobalEffectEnableTime(0),
+ mPatchPanel(this),
mSystemReady(false)
{
// unsigned instead of audio_unique_id_use_t, because ++ operator is unavailable for enum
@@ -168,7 +161,6 @@
mNextUniqueIds[use] = AUDIO_UNIQUE_ID_USE_MAX;
}
- getpid_cached = getpid();
const bool doLog = property_get_bool("ro.test_harness", false);
if (doLog) {
mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
@@ -185,27 +177,6 @@
mEffectsFactoryHal = EffectsFactoryHalInterface::create();
mMediaLogNotifier->run("MediaLogNotifier");
-
-#ifdef TEE_SINK
- char value[PROPERTY_VALUE_MAX];
- (void) property_get("ro.debuggable", value, "0");
- int debuggable = atoi(value);
- int teeEnabled = 0;
- if (debuggable) {
- (void) property_get("af.tee", value, "0");
- teeEnabled = atoi(value);
- }
- // FIXME symbolic constants here
- if (teeEnabled & 1) {
- mTeeSinkInputEnabled = true;
- }
- if (teeEnabled & 2) {
- mTeeSinkOutputEnabled = true;
- }
- if (teeEnabled & 4) {
- mTeeSinkTrackEnabled = true;
- }
-#endif
}
void AudioFlinger::onFirstRef()
@@ -226,8 +197,6 @@
}
}
- mPatchPanel = new PatchPanel(this);
-
mMode = AUDIO_MODE_NORMAL;
gAudioFlinger = this;
@@ -341,7 +310,7 @@
*sessionId = actualSessionId;
} else {
if (direction == MmapStreamInterface::DIRECTION_OUTPUT) {
- AudioSystem::releaseOutput(io, streamType, actualSessionId);
+ AudioSystem::releaseOutput(portId);
} else {
AudioSystem::releaseInput(portId);
}
@@ -469,6 +438,18 @@
if (!dumpAllowed()) {
dumpPermissionDenial(fd, args);
} else {
+ // XXX This is sort of hacky for now.
+ const bool formatJson = std::any_of(args.begin(), args.end(),
+ [](const String16 &arg) { return arg == String16("--json"); });
+ if (formatJson) {
+ Json::Value root = getJsonDump();
+ Json::FastWriter writer;
+ std::string rootStr = writer.write(root);
+ // XXX consider buffering if the string happens to be too long.
+ dprintf(fd, "%s", rootStr.c_str());
+ return NO_ERROR;
+ }
+
// get state of hardware lock
bool hardwareLocked = dumpTryLock(mHardwareLock);
if (!hardwareLocked) {
@@ -478,7 +459,7 @@
mHardwareLock.unlock();
}
- bool locked = dumpTryLock(mLock);
+ const bool locked = dumpTryLock(mLock);
// failed to lock - AudioFlinger is probably deadlocked
if (!locked) {
@@ -534,12 +515,7 @@
dev->dump(fd);
}
-#ifdef TEE_SINK
- // dump the serially shared record tee sink
- if (mRecordTeeSource != 0) {
- dumpTee(fd, mRecordTeeSource, AUDIO_IO_HANDLE_NONE, 'C');
- }
-#endif
+ mPatchPanel.dump(fd);
BUFLOG_RESET;
@@ -547,6 +523,10 @@
mLock.unlock();
}
+#ifdef TEE_SINK
+ // NBAIO_Tee dump is safe to call outside of AF lock.
+ NBAIO_Tee::dumpAll(fd, "_DUMP");
+#endif
// append a copy of media.log here by forwarding fd to it, but don't attempt
// to lookup the service if it's not running, as it will block for a second
if (sMediaLogServiceAsBinder != 0) {
@@ -581,6 +561,32 @@
return NO_ERROR;
}
+Json::Value AudioFlinger::getJsonDump()
+{
+ Json::Value root(Json::objectValue);
+ const bool locked = dumpTryLock(mLock);
+
+ // failed to lock - AudioFlinger is probably deadlocked
+ if (!locked) {
+ root["deadlock_message"] = kDeadlockedString;
+ }
+ // FIXME risky to access data structures without a lock held?
+
+ Json::Value playbackThreads = Json::arrayValue;
+ // dump playback threads
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ playbackThreads.append(mPlaybackThreads.valueAt(i)->getJsonDump());
+ }
+
+ if (locked) {
+ mLock.unlock();
+ }
+
+ root["playback_threads"] = playbackThreads;
+
+ return root;
+}
+
sp<AudioFlinger::Client> AudioFlinger::registerPid(pid_t pid)
{
Mutex::Autolock _cl(mClientLock);
@@ -666,7 +672,7 @@
bool updatePid = (input.clientInfo.clientPid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
uid_t clientUid = input.clientInfo.clientUid;
- if (!isTrustedCallingUid(callingUid)) {
+ if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(clientUid != callingUid,
"%s uid %d tried to pass itself off as %d",
__FUNCTION__, callingUid, clientUid);
@@ -813,7 +819,7 @@
Exit:
if (lStatus != NO_ERROR && output.outputId != AUDIO_IO_HANDLE_NONE) {
- AudioSystem::releaseOutput(output.outputId, streamType, sessionId);
+ AudioSystem::releaseOutput(portId);
}
*status = lStatus;
return trackHandle;
@@ -1078,9 +1084,9 @@
ALOGW("checkStreamType() invalid stream %d", stream);
return BAD_VALUE;
}
- pid_t caller = IPCThreadState::self()->getCallingPid();
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT && caller != getpid_cached) {
- ALOGW("checkStreamType() pid %d cannot use internal stream type %d", caller, stream);
+ const uid_t callerUid = IPCThreadState::self()->getCallingUid();
+ if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT && !isAudioServerUid(callerUid)) {
+ ALOGW("checkStreamType() uid %d cannot use internal stream type %d", callerUid, stream);
return PERMISSION_DENIED;
}
@@ -1180,6 +1186,23 @@
}
}
+// forwardAudioHwSyncToDownstreamPatches_l() must be called with AudioFlinger::mLock held
+void AudioFlinger::forwardParametersToDownstreamPatches_l(
+ audio_io_handle_t upStream, const String8& keyValuePairs,
+ std::function<bool(const sp<PlaybackThread>&)> useThread)
+{
+ std::vector<PatchPanel::SoftwarePatch> swPatches;
+ if (mPatchPanel.getDownstreamSoftwarePatches(upStream, &swPatches) != OK) return;
+ ALOGV_IF(!swPatches.empty(), "%s found %zu downstream patches for stream ID %d",
+ __func__, swPatches.size(), upStream);
+ for (const auto& swPatch : swPatches) {
+ sp<PlaybackThread> downStream = checkPlaybackThread_l(swPatch.getPlaybackThreadHandle());
+ if (downStream != NULL && (useThread == nullptr || useThread(downStream))) {
+ downStream->setParameters(keyValuePairs);
+ }
+ }
+}
+
// Filter reserved keys from setParameters() before forwarding to audio HAL or acting upon.
// Some keys are used for audio routing and audio path configuration and should be reserved for use
// by audio policy and audio flinger for functional, privacy and security reasons.
@@ -1200,9 +1223,8 @@
String8(AudioParameter::keyStreamSupportedSamplingRates),
};
- // multiuser friendly app ID check for requests coming from audioserver
- if (multiuser_get_app_id(callingUid) == AID_AUDIOSERVER) {
- return;
+ if (isAudioServerUid(callingUid)) {
+ return; // no need to filter if audioserver.
}
AudioParameter param = AudioParameter(keyValuePairs);
@@ -1296,7 +1318,9 @@
}
}
if (thread != 0) {
- return thread->setParameters(filteredKeyValuePairs);
+ status_t result = thread->setParameters(filteredKeyValuePairs);
+ forwardParametersToDownstreamPatches_l(thread->id(), filteredKeyValuePairs);
+ return result;
}
return BAD_VALUE;
}
@@ -1636,7 +1660,7 @@
bool updatePid = (input.clientInfo.clientPid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
uid_t clientUid = input.clientInfo.clientUid;
- if (!isTrustedCallingUid(callingUid)) {
+ if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(clientUid != callingUid,
"%s uid %d tried to pass itself off as %d",
__FUNCTION__, callingUid, clientUid);
@@ -1854,6 +1878,10 @@
mHardwareStatus = AUDIO_HW_IDLE;
}
+ if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_MSD) == 0) {
+ // An MSD module is inserted before hardware modules in order to mix encoded streams.
+ flags = static_cast<AudioHwDevice::Flags>(flags | AudioHwDevice::AHWD_IS_INSERT);
+ }
audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
@@ -1885,7 +1913,7 @@
status_t AudioFlinger::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory)
{
uid_t uid = IPCThreadState::self()->getCallingUid();
- if (uid != AID_SYSTEM) {
+ if (!isAudioServerOrSystemServerUid(uid)) {
return PERMISSION_DENIED;
}
Mutex::Autolock _l(mLock);
@@ -1930,6 +1958,28 @@
return mClientSharedHeapSize;
}
+status_t AudioFlinger::setAudioPortConfig(const struct audio_port_config *config)
+{
+ ALOGV(__func__);
+
+ audio_module_handle_t module;
+ if (config->type == AUDIO_PORT_TYPE_DEVICE) {
+ module = config->ext.device.hw_module;
+ } else {
+ module = config->ext.mix.hw_module;
+ }
+
+ Mutex::Autolock _l(mLock);
+ ssize_t index = mAudioHwDevs.indexOfKey(module);
+ if (index < 0) {
+ ALOGW("%s() bad hw module %d", __func__, module);
+ return BAD_VALUE;
+ }
+
+ AudioHwDevice *audioHwDevice = mAudioHwDevs.valueAt(index);
+ return audioHwDevice->hwDevice()->setAudioPortConfig(config);
+}
+
audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId)
{
Mutex::Autolock _l(mLock);
@@ -1975,7 +2025,10 @@
if (sessions & ThreadBase::TRACK_SESSION) {
AudioParameter param = AudioParameter();
param.addInt(String8(AudioParameter::keyStreamHwAvSync), value);
- thread->setParameters(param.toString());
+ String8 keyValuePairs = param.toString();
+ thread->setParameters(keyValuePairs);
+ forwardParametersToDownstreamPatches_l(thread->id(), keyValuePairs,
+ [](const sp<PlaybackThread>& thread) { return thread->usesHwAvSync(); });
break;
}
}
@@ -2021,7 +2074,10 @@
ALOGV("setAudioHwSyncForSession_l found ID %d for session %d", syncId, sessionId);
AudioParameter param = AudioParameter();
param.addInt(String8(AudioParameter::keyStreamHwAvSync), syncId);
- thread->setParameters(param.toString());
+ String8 keyValuePairs = param.toString();
+ thread->setParameters(keyValuePairs);
+ forwardParametersToDownstreamPatches_l(thread->id(), keyValuePairs,
+ [](const sp<PlaybackThread>& thread) { return thread->usesHwAvSync(); });
}
}
@@ -2111,6 +2167,7 @@
*output, thread.get());
}
mPlaybackThreads.add(*output, thread);
+ mPatchPanel.notifyStreamOpened(outHwDev, *output);
return thread;
}
}
@@ -2246,6 +2303,7 @@
const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
ioDesc->mIoHandle = output;
ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
+ mPatchPanel.notifyStreamClosed(output);
}
// The thread entity (active unit of execution) is no longer running here,
// but the ThreadBase container still exists.
@@ -2274,7 +2332,7 @@
delete out;
}
-void AudioFlinger::closeOutputInternal_l(const sp<PlaybackThread>& thread)
+void AudioFlinger::closeThreadInternal_l(const sp<PlaybackThread>& thread)
{
mPlaybackThreads.removeItem(thread->mId);
thread->exit();
@@ -2350,6 +2408,9 @@
return 0;
}
+ // Some flags are specific to framework and must not leak to the HAL.
+ flags = static_cast<audio_input_flags_t>(flags & ~AUDIO_INPUT_FRAMEWORK_FLAGS);
+
// Audio Policy can request a specific handle for hardware hotword.
// The goal here is not to re-open an already opened input.
// It is to use a pre-assigned I/O handle.
@@ -2407,55 +2468,6 @@
thread.get());
return thread;
} else {
-#ifdef TEE_SINK
- // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
- // or (re-)create if current Pipe is idle and does not match the new format
- sp<NBAIO_Sink> teeSink;
- enum {
- TEE_SINK_NO, // don't copy input
- TEE_SINK_NEW, // copy input using a new pipe
- TEE_SINK_OLD, // copy input using an existing pipe
- } kind;
- NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate,
- audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format);
- if (!mTeeSinkInputEnabled) {
- kind = TEE_SINK_NO;
- } else if (!Format_isValid(format)) {
- kind = TEE_SINK_NO;
- } else if (mRecordTeeSink == 0) {
- kind = TEE_SINK_NEW;
- } else if (mRecordTeeSink->getStrongCount() != 1) {
- kind = TEE_SINK_NO;
- } else if (Format_isEqual(format, mRecordTeeSink->format())) {
- kind = TEE_SINK_OLD;
- } else {
- kind = TEE_SINK_NEW;
- }
- switch (kind) {
- case TEE_SINK_NEW: {
- Pipe *pipe = new Pipe(mTeeSinkInputFrames, format);
- size_t numCounterOffers = 0;
- const NBAIO_Format offers[1] = {format};
- ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- PipeReader *pipeReader = new PipeReader(*pipe);
- numCounterOffers = 0;
- index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mRecordTeeSink = pipe;
- mRecordTeeSource = pipeReader;
- teeSink = pipe;
- }
- break;
- case TEE_SINK_OLD:
- teeSink = mRecordTeeSink;
- break;
- case TEE_SINK_NO:
- default:
- break;
- }
-#endif
-
// Start record thread
// RecordThread requires both input and output device indication to forward to audio
// pre processing modules
@@ -2465,9 +2477,6 @@
primaryOutputDevice_l(),
devices,
mSystemReady
-#ifdef TEE_SINK
- , teeSink
-#endif
);
mRecordThreads.add(*input, thread);
ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get());
@@ -2567,7 +2576,7 @@
delete in;
}
-void AudioFlinger::closeInputInternal_l(const sp<RecordThread>& thread)
+void AudioFlinger::closeThreadInternal_l(const sp<RecordThread>& thread)
{
mRecordThreads.removeItem(thread->mId);
closeInputFinish(thread);
@@ -2605,7 +2614,8 @@
Mutex::Autolock _l(mLock);
pid_t caller = IPCThreadState::self()->getCallingPid();
ALOGV("acquiring %d from %d, for %d", audioSession, caller, pid);
- if (pid != -1 && (caller == getpid_cached)) {
+ const uid_t callerUid = IPCThreadState::self()->getCallingUid();
+ if (pid != -1 && isAudioServerUid(callerUid)) { // check must match releaseAudioSessionId()
caller = pid;
}
@@ -2639,7 +2649,8 @@
Mutex::Autolock _l(mLock);
pid_t caller = IPCThreadState::self()->getCallingPid();
ALOGV("releasing %d from %d for %d", audioSession, caller, pid);
- if (pid != -1 && (caller == getpid_cached)) {
+ const uid_t callerUid = IPCThreadState::self()->getCallingUid();
+ if (pid != -1 && isAudioServerUid(callerUid)) { // check must match acquireAudioSessionId()
caller = pid;
}
size_t num = mAudioSessionRefs.size();
@@ -2656,9 +2667,10 @@
return;
}
}
- // If the caller is mediaserver it is likely that the session being released was acquired
+ // If the caller is audioserver it is likely that the session being released was acquired
// on behalf of a process not in notification clients and we ignore the warning.
- ALOGW_IF(caller != getpid_cached, "session id %d not found for pid %d", audioSession, caller);
+ ALOGW_IF(!isAudioServerUid(callerUid),
+ "session id %d not found for pid %d", audioSession, caller);
}
bool AudioFlinger::isSessionAcquired_l(audio_session_t audioSession)
@@ -2938,16 +2950,74 @@
}
status_t AudioFlinger::getEffectDescriptor(const effect_uuid_t *pUuid,
- effect_descriptor_t *descriptor) const
+ const effect_uuid_t *pTypeUuid,
+ uint32_t preferredTypeFlag,
+ effect_descriptor_t *descriptor) const
{
+ if (pUuid == NULL || pTypeUuid == NULL || descriptor == NULL) {
+ return BAD_VALUE;
+ }
+
Mutex::Autolock _l(mLock);
- if (mEffectsFactoryHal.get()) {
- return mEffectsFactoryHal->getDescriptor(pUuid, descriptor);
- } else {
+
+ if (!mEffectsFactoryHal.get()) {
return -ENODEV;
}
-}
+ status_t status = NO_ERROR;
+ if (!EffectsFactoryHalInterface::isNullUuid(pUuid)) {
+ // If uuid is specified, request effect descriptor from that.
+ status = mEffectsFactoryHal->getDescriptor(pUuid, descriptor);
+ } else if (!EffectsFactoryHalInterface::isNullUuid(pTypeUuid)) {
+ // If uuid is not specified, look for an available implementation
+ // of the required type instead.
+
+ // Use a temporary descriptor to avoid modifying |descriptor| in the failure case.
+ effect_descriptor_t desc;
+ desc.flags = 0; // prevent compiler warning
+
+ uint32_t numEffects = 0;
+ status = mEffectsFactoryHal->queryNumberEffects(&numEffects);
+ if (status < 0) {
+ ALOGW("getEffectDescriptor() error %d from FactoryHal queryNumberEffects", status);
+ return status;
+ }
+
+ bool found = false;
+ for (uint32_t i = 0; i < numEffects; i++) {
+ status = mEffectsFactoryHal->getDescriptor(i, &desc);
+ if (status < 0) {
+ ALOGW("getEffectDescriptor() error %d from FactoryHal getDescriptor", status);
+ continue;
+ }
+ if (memcmp(&desc.type, pTypeUuid, sizeof(effect_uuid_t)) == 0) {
+ // If matching type found save effect descriptor.
+ found = true;
+ *descriptor = desc;
+
+ // If there's no preferred flag or this descriptor matches the preferred
+ // flag, success! If this descriptor doesn't match the preferred
+ // flag, continue enumeration in case a better matching version of this
+ // effect type is available. Note that this means if no effect with a
+ // correct flag is found, the descriptor returned will correspond to the
+ // last effect that at least had a matching type uuid (if any).
+ if (preferredTypeFlag == EFFECT_FLAG_TYPE_MASK ||
+ (desc.flags & EFFECT_FLAG_TYPE_MASK) == preferredTypeFlag) {
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ status = NAME_NOT_FOUND;
+ ALOGW("getEffectDescriptor(): Effect not found by type.");
+ }
+ } else {
+ status = BAD_VALUE;
+ ALOGE("getEffectDescriptor(): Either uuid or type uuid must be non-null UUIDs.");
+ }
+ return status;
+}
sp<IEffect> AudioFlinger::createEffect(
effect_descriptor_t *pDesc,
@@ -2966,7 +3036,7 @@
effect_descriptor_t desc;
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (pid == -1 || !isTrustedCallingUid(callingUid)) {
+ if (pid == -1 || !isAudioServerOrMediaServerUid(callingUid)) {
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
ALOGW_IF(pid != -1 && pid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
@@ -2989,8 +3059,8 @@
}
// Session AUDIO_SESSION_OUTPUT_STAGE is reserved for output stage effects
- // that can only be created by audio policy manager (running in same process)
- if (sessionId == AUDIO_SESSION_OUTPUT_STAGE && getpid_cached != pid) {
+ // that can only be created by audio policy manager
+ if (sessionId == AUDIO_SESSION_OUTPUT_STAGE && !isAudioServerUid(callingUid)) {
lStatus = PERMISSION_DENIED;
goto Exit;
}
@@ -3001,60 +3071,15 @@
}
{
- if (!EffectsFactoryHalInterface::isNullUuid(&pDesc->uuid)) {
- // if uuid is specified, request effect descriptor
- lStatus = mEffectsFactoryHal->getDescriptor(&pDesc->uuid, &desc);
- if (lStatus < 0) {
- ALOGW("createEffect() error %d from EffectGetDescriptor", lStatus);
- goto Exit;
- }
- } else {
- // if uuid is not specified, look for an available implementation
- // of the required type in effect factory
- if (EffectsFactoryHalInterface::isNullUuid(&pDesc->type)) {
- ALOGW("createEffect() no effect type");
- lStatus = BAD_VALUE;
- goto Exit;
- }
- uint32_t numEffects = 0;
- effect_descriptor_t d;
- d.flags = 0; // prevent compiler warning
- bool found = false;
-
- lStatus = mEffectsFactoryHal->queryNumberEffects(&numEffects);
- if (lStatus < 0) {
- ALOGW("createEffect() error %d from EffectQueryNumberEffects", lStatus);
- goto Exit;
- }
- for (uint32_t i = 0; i < numEffects; i++) {
- lStatus = mEffectsFactoryHal->getDescriptor(i, &desc);
- if (lStatus < 0) {
- ALOGW("createEffect() error %d from EffectQueryEffect", lStatus);
- continue;
- }
- if (memcmp(&desc.type, &pDesc->type, sizeof(effect_uuid_t)) == 0) {
- // If matching type found save effect descriptor. If the session is
- // 0 and the effect is not auxiliary, continue enumeration in case
- // an auxiliary version of this effect type is available
- found = true;
- d = desc;
- if (sessionId != AUDIO_SESSION_OUTPUT_MIX ||
- (desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- break;
- }
- }
- }
- if (!found) {
- lStatus = BAD_VALUE;
- ALOGW("createEffect() effect not found");
- goto Exit;
- }
- // For same effect type, chose auxiliary version over insert version if
- // connect to output mix (Compliance to OpenSL ES)
- if (sessionId == AUDIO_SESSION_OUTPUT_MIX &&
- (d.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_AUXILIARY) {
- desc = d;
- }
+ // Get the full effect descriptor from the uuid/type.
+ // If the session is the output mix, prefer an auxiliary effect,
+ // otherwise no preference.
+ uint32_t preferredType = (sessionId == AUDIO_SESSION_OUTPUT_MIX ?
+ EFFECT_FLAG_TYPE_AUXILIARY : EFFECT_FLAG_TYPE_MASK);
+ lStatus = getEffectDescriptor(&pDesc->uuid, &pDesc->type, preferredType, &desc);
+ if (lStatus < 0) {
+ ALOGW("createEffect() error %d from getEffectDescriptor", lStatus);
+ goto Exit;
}
// Do not allow auxiliary effects on a session different from 0 (output mix)
@@ -3370,136 +3395,6 @@
}
-struct Entry {
-#define TEE_MAX_FILENAME 32 // %Y%m%d%H%M%S_%d.wav = 4+2+2+2+2+2+1+1+4+1 = 21
- char mFileName[TEE_MAX_FILENAME];
-};
-
-int comparEntry(const void *p1, const void *p2)
-{
- return strcmp(((const Entry *) p1)->mFileName, ((const Entry *) p2)->mFileName);
-}
-
-#ifdef TEE_SINK
-void AudioFlinger::dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id, char suffix)
-{
- NBAIO_Source *teeSource = source.get();
- if (teeSource != NULL) {
- // .wav rotation
- // There is a benign race condition if 2 threads call this simultaneously.
- // They would both traverse the directory, but the result would simply be
- // failures at unlink() which are ignored. It's also unlikely since
- // normally dumpsys is only done by bugreport or from the command line.
- char teePath[PATH_MAX] = "/data/misc/audioserver";
- size_t teePathLen = strlen(teePath);
- DIR *dir = opendir(teePath);
- teePath[teePathLen++] = '/';
- if (dir != NULL) {
-#define TEE_MAX_SORT 20 // number of entries to sort
-#define TEE_MAX_KEEP 10 // number of entries to keep
- struct Entry entries[TEE_MAX_SORT];
- size_t entryCount = 0;
- while (entryCount < TEE_MAX_SORT) {
- errno = 0; // clear errno before readdir() to track potential errors.
- const struct dirent *result = readdir(dir);
- if (result == nullptr) {
- ALOGW_IF(errno != 0, "tee readdir() failure %s", strerror(errno));
- break;
- }
- // ignore non .wav file entries
- const size_t nameLen = strlen(result->d_name);
- if (nameLen <= 4 || nameLen >= TEE_MAX_FILENAME ||
- strcmp(&result->d_name[nameLen - 4], ".wav")) {
- continue;
- }
- (void)audio_utils_strlcpy(entries[entryCount++].mFileName, result->d_name);
- }
- (void) closedir(dir);
- if (entryCount > TEE_MAX_KEEP) {
- qsort(entries, entryCount, sizeof(Entry), comparEntry);
- for (size_t i = 0; i < entryCount - TEE_MAX_KEEP; ++i) {
- strcpy(&teePath[teePathLen], entries[i].mFileName);
- (void) unlink(teePath);
- }
- }
- } else {
- if (fd >= 0) {
- dprintf(fd, "unable to rotate tees in %.*s: %s\n", (int) teePathLen, teePath,
- strerror(errno));
- }
- }
- char teeTime[16];
- struct timeval tv;
- gettimeofday(&tv, NULL);
- struct tm tm;
- localtime_r(&tv.tv_sec, &tm);
- strftime(teeTime, sizeof(teeTime), "%Y%m%d%H%M%S", &tm);
- snprintf(&teePath[teePathLen], sizeof(teePath) - teePathLen, "%s_%d_%c.wav", teeTime, id,
- suffix);
- // if 2 dumpsys are done within 1 second, and rotation didn't work, then discard 2nd
- int teeFd = open(teePath, O_WRONLY | O_CREAT | O_EXCL | O_NOFOLLOW, S_IRUSR | S_IWUSR);
- if (teeFd >= 0) {
- // FIXME use libsndfile
- char wavHeader[44];
- memcpy(wavHeader,
- "RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0",
- sizeof(wavHeader));
- NBAIO_Format format = teeSource->format();
- unsigned channelCount = Format_channelCount(format);
- uint32_t sampleRate = Format_sampleRate(format);
- size_t frameSize = Format_frameSize(format);
- wavHeader[22] = channelCount; // number of channels
- wavHeader[24] = sampleRate; // sample rate
- wavHeader[25] = sampleRate >> 8;
- wavHeader[32] = frameSize; // block alignment
- wavHeader[33] = frameSize >> 8;
- write(teeFd, wavHeader, sizeof(wavHeader));
- size_t total = 0;
- bool firstRead = true;
-#define TEE_SINK_READ 1024 // frames per I/O operation
- void *buffer = malloc(TEE_SINK_READ * frameSize);
- for (;;) {
- size_t count = TEE_SINK_READ;
- ssize_t actual = teeSource->read(buffer, count);
- bool wasFirstRead = firstRead;
- firstRead = false;
- if (actual <= 0) {
- if (actual == (ssize_t) OVERRUN && wasFirstRead) {
- continue;
- }
- break;
- }
- ALOG_ASSERT(actual <= (ssize_t)count);
- write(teeFd, buffer, actual * frameSize);
- total += actual;
- }
- free(buffer);
- lseek(teeFd, (off_t) 4, SEEK_SET);
- uint32_t temp = 44 + total * frameSize - 8;
- // FIXME not big-endian safe
- write(teeFd, &temp, sizeof(temp));
- lseek(teeFd, (off_t) 40, SEEK_SET);
- temp = total * frameSize;
- // FIXME not big-endian safe
- write(teeFd, &temp, sizeof(temp));
- close(teeFd);
- // TODO Should create file with temporary name and then rename to final if non-empty.
- if (total > 0) {
- if (fd >= 0) {
- dprintf(fd, "tee copied to %s\n", teePath);
- }
- } else {
- unlink(teePath);
- }
- } else {
- if (fd >= 0) {
- dprintf(fd, "unable to create tee %s: %s\n", teePath, strerror(errno));
- }
- }
- }
-}
-#endif
-
// ----------------------------------------------------------------------------
status_t AudioFlinger::onTransact(
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 963a87d..e9e6e94 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -23,6 +23,8 @@
#include <mutex>
#include <deque>
#include <map>
+#include <memory>
+#include <string>
#include <vector>
#include <stdint.h>
#include <sys/types.h>
@@ -62,7 +64,9 @@
#include <media/LinearMap.h>
#include <media/VolumeShaper.h>
+#include <audio_utils/clock.h>
#include <audio_utils/SimpleLog.h>
+#include <audio_utils/TimestampVerifier.h>
#include "FastCapture.h"
#include "FastMixer.h"
@@ -71,9 +75,11 @@
#include "AudioStreamOut.h"
#include "SpdifStreamOut.h"
#include "AudioHwDevice.h"
+#include "NBAIO_Tee.h"
#include <powermanager/IPowerManager.h>
+#include <json/json.h>
#include <media/nblog/NBLog.h>
#include <private/media/AudioEffectShared.h>
#include <private/media/AudioTrackShared.h>
@@ -109,6 +115,7 @@
static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
virtual status_t dump(int fd, const Vector<String16>& args);
+ Json::Value getJsonDump();
// IAudioFlinger interface, in binder opcode order
virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
@@ -202,6 +209,8 @@
virtual status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor) const;
virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
+ const effect_uuid_t *pTypeUuid,
+ uint32_t preferredTypeFlag,
effect_descriptor_t *descriptor) const;
virtual sp<IEffect> createEffect(
@@ -675,6 +684,9 @@
bool updateOrphanEffectChains(const sp<EffectModule>& effect);
void broacastParametersToRecordThreads_l(const String8& keyValuePairs);
+ void forwardParametersToDownstreamPatches_l(
+ audio_io_handle_t upStream, const String8& keyValuePairs,
+ std::function<bool(const sp<PlaybackThread>&)> useThread = nullptr);
// AudioStreamIn is immutable, so their fields are const.
// For emphasis, we could also make all pointers to them be "const *",
@@ -791,44 +803,16 @@
// for use from destructor
status_t closeOutput_nonvirtual(audio_io_handle_t output);
- void closeOutputInternal_l(const sp<PlaybackThread>& thread);
+ void closeThreadInternal_l(const sp<PlaybackThread>& thread);
status_t closeInput_nonvirtual(audio_io_handle_t input);
- void closeInputInternal_l(const sp<RecordThread>& thread);
+ void closeThreadInternal_l(const sp<RecordThread>& thread);
void setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId);
status_t checkStreamType(audio_stream_type_t stream) const;
void filterReservedParameters(String8& keyValuePairs, uid_t callingUid);
-#ifdef TEE_SINK
- // all record threads serially share a common tee sink, which is re-created on format change
- sp<NBAIO_Sink> mRecordTeeSink;
- sp<NBAIO_Source> mRecordTeeSource;
-#endif
-
public:
-
-#ifdef TEE_SINK
- // tee sink, if enabled by property, allows dumpsys to write most recent audio to .wav file
- static void dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id, char suffix);
-
- // whether tee sink is enabled by property
- static bool mTeeSinkInputEnabled;
- static bool mTeeSinkOutputEnabled;
- static bool mTeeSinkTrackEnabled;
-
- // runtime configured size of each tee sink pipe, in frames
- static size_t mTeeSinkInputFrames;
- static size_t mTeeSinkOutputFrames;
- static size_t mTeeSinkTrackFrames;
-
- // compile-time default size of tee sink pipes, in frames
- // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes
- static const size_t kTeeSinkInputFramesDefault = 0x200000;
- static const size_t kTeeSinkOutputFramesDefault = 0x200000;
- static const size_t kTeeSinkTrackFramesDefault = 0x200000;
-#endif
-
// These methods read variables atomically without mLock,
// though the variables are updated with mLock.
bool isLowRamDevice() const { return mIsLowRamDevice; }
@@ -843,7 +827,8 @@
nsecs_t mGlobalEffectEnableTime; // when a global effect was last enabled
- sp<PatchPanel> mPatchPanel;
+ // protected by mLock
+ PatchPanel mPatchPanel;
sp<EffectsFactoryHalInterface> mEffectsFactoryHal;
bool mSystemReady;
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index eb826c6..d4299b0 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -35,6 +35,9 @@
enum Flags {
AHWD_CAN_SET_MASTER_VOLUME = 0x1,
AHWD_CAN_SET_MASTER_MUTE = 0x2,
+ // Means that this isn't a terminal module, and software patches
+ // are used to transport audio data further.
+ AHWD_IS_INSERT = 0x4,
};
AudioHwDevice(audio_module_handle_t handle,
@@ -55,6 +58,10 @@
return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
}
+ bool isInsert() const {
+ return (0 != (mFlags & AHWD_IS_INSERT));
+ }
+
audio_module_handle_t handle() const { return mHandle; }
const char *moduleName() const { return mModuleName; }
sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h
index ede8e3f..34cd821 100644
--- a/services/audioflinger/Configuration.h
+++ b/services/audioflinger/Configuration.h
@@ -27,7 +27,7 @@
//#define AUDIO_WATCHDOG
// uncomment to display CPU load adjusted for CPU frequency
-//#define CPU_FREQUENCY_STATISTICS
+//define CPU_FREQUENCY_STATISTICS
// uncomment to enable fast threads to take performance samples for later statistical analysis
#define FAST_THREAD_STATISTICS
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 024e7eb..ded2146 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -31,9 +31,9 @@
#include <media/AudioEffect.h>
#include <media/audiohal/EffectHalInterface.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <mediautils/ServiceUtilities.h>
#include "AudioFlinger.h"
-#include "ServiceUtilities.h"
// ----------------------------------------------------------------------------
@@ -1849,7 +1849,7 @@
bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock);
snprintf(buffer, size, "\t\t\t%5d %5d %3s %3s %5u %5u\n",
- (mClient == 0) ? getpid_cached : mClient->pid(),
+ (mClient == 0) ? getpid() : mClient->pid(),
mPriority,
mHasControl ? "yes" : "no",
locked ? "yes" : "no",
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index d063772..dd84bf2 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -20,6 +20,7 @@
#define ATRACE_TAG ATRACE_TAG_AUDIO
#include "Configuration.h"
+#include <audio_utils/format.h>
#include <linux/futex.h>
#include <sys/syscall.h>
#include <media/AudioBufferProvider.h>
@@ -161,7 +162,21 @@
const FastCaptureState * const current = (const FastCaptureState *) mCurrent;
FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) mDumpState;
const FastCaptureState::Command command = mCommand;
- const size_t frameCount = current->mFrameCount;
+ size_t frameCount = current->mFrameCount;
+ AudioBufferProvider* fastPatchRecordBufferProvider = current->mFastPatchRecordBufferProvider;
+ AudioBufferProvider::Buffer patchBuffer;
+
+ if (fastPatchRecordBufferProvider != 0) {
+ patchBuffer.frameCount = ~0;
+ status_t status = fastPatchRecordBufferProvider->getNextBuffer(&patchBuffer);
+ if (status != NO_ERROR) {
+ frameCount = 0;
+ } else if (patchBuffer.frameCount < frameCount) {
+ // TODO: Make sure that it doesn't cause any issues if we just get a small available
+ // buffer from the buffer provider.
+ frameCount = patchBuffer.frameCount;
+ }
+ }
if ((command & FastCaptureState::READ) /*&& isWarm*/) {
ALOG_ASSERT(mInputSource != NULL);
@@ -176,6 +191,7 @@
mTotalNativeFramesRead += framesRead;
dumpState->mFramesRead = mTotalNativeFramesRead;
mReadBufferState = framesRead;
+ patchBuffer.frameCount = framesRead;
} else {
dumpState->mReadErrors++;
mReadBufferState = 0;
@@ -193,11 +209,18 @@
}
if (mReadBufferState > 0) {
ssize_t framesWritten = mPipeSink->write(mReadBuffer, mReadBufferState);
- // FIXME This supports at most one fast capture client.
- // To handle multiple clients this could be converted to an array,
- // or with a lot more work the control block could be shared by all clients.
audio_track_cblk_t* cblk = current->mCblk;
- if (cblk != NULL && framesWritten > 0) {
+ if (fastPatchRecordBufferProvider != 0) {
+ // This indicates the fast track is a patch record, update the cblk by
+ // calling releaseBuffer().
+ memcpy_by_audio_format(patchBuffer.raw, current->mFastPatchRecordFormat,
+ mReadBuffer, mFormat.mFormat, framesWritten * mFormat.mChannelCount);
+ patchBuffer.frameCount = framesWritten;
+ fastPatchRecordBufferProvider->releaseBuffer(&patchBuffer);
+ } else if (cblk != NULL && framesWritten > 0) {
+ // FIXME This supports at most one fast capture client.
+ // To handle multiple clients this could be converted to an array,
+ // or with a lot more work the control block could be shared by all clients.
int32_t rear = cblk->u.mStreaming.mRear;
android_atomic_release_store(framesWritten + rear, &cblk->u.mStreaming.mRear);
cblk->mServer += framesWritten;
diff --git a/services/audioflinger/FastCaptureState.h b/services/audioflinger/FastCaptureState.h
index 9bca2d4..d287232 100644
--- a/services/audioflinger/FastCaptureState.h
+++ b/services/audioflinger/FastCaptureState.h
@@ -18,6 +18,7 @@
#define ANDROID_AUDIO_FAST_CAPTURE_STATE_H
#include <media/nbaio/NBAIO.h>
+#include <media/AudioBufferProvider.h>
#include "FastThreadState.h"
#include <private/media/AudioTrackShared.h>
@@ -37,6 +38,10 @@
size_t mFrameCount; // number of frames per fast capture buffer
audio_track_cblk_t* mCblk; // control block for the single fast client, or NULL
+ audio_format_t mFastPatchRecordFormat = AUDIO_FORMAT_INVALID;
+ AudioBufferProvider* mFastPatchRecordBufferProvider = nullptr; // a reference to a patch
+ // record in fast mode
+
// Extends FastThreadState::Command
static const Command
// The following commands also process configuration changes, and can be "or"ed:
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 79bb9fe..a42d6b3 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -32,7 +32,7 @@
#include <utils/Trace.h>
#include <system/audio.h>
#ifdef FAST_THREAD_STATISTICS
-#include <cpustats/CentralTendencyStatistics.h>
+#include <audio_utils/Statistics.h>
#ifdef CPU_FREQUENCY_STATISTICS
#include <cpustats/ThreadCpuUsage.h>
#endif
@@ -47,7 +47,8 @@
/*static*/ const FastMixerState FastMixer::sInitial;
-FastMixer::FastMixer() : FastThread("cycle_ms", "load_us"),
+FastMixer::FastMixer(audio_io_handle_t parentIoHandle)
+ : FastThread("cycle_ms", "load_us"),
// mFastTrackNames
// mGenerations
mOutputSink(NULL),
@@ -66,8 +67,11 @@
mTotalNativeFramesWritten(0),
// timestamp
mNativeFramesWrittenButNotPresented(0), // the = 0 is to silence the compiler
- mMasterMono(false)
+ mMasterMono(false),
+ mThreadIoHandle(parentIoHandle)
{
+ (void)mThreadIoHandle; // prevent unused warning, see C++17 [[maybe_unused]]
+
// FIXME pass sInitial as parameter to base class constructor, and make it static local
mPrevious = &sInitial;
mCurrent = &sInitial;
@@ -220,6 +224,10 @@
previousTrackMask = 0;
mFastTracksGen = current->mFastTracksGen - 1;
dumpState->mFrameCount = frameCount;
+#ifdef TEE_SINK
+ mTee.set(mFormat, NBAIO_Tee::TEE_FLAG_OUTPUT_THREAD);
+ mTee.setId(std::string("_") + std::to_string(mThreadIoHandle) + "_F");
+#endif
} else {
previousTrackMask = previous->mTrackMask;
}
@@ -328,13 +336,15 @@
{
// TODO: pass an ID parameter to indicate which time series we want to write to in NBLog.cpp
// Or: pass both of these into a single call with a boolean
+ const FastMixerState * const current = (const FastMixerState *) mCurrent;
+ FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
+
if (mIsWarm) {
LOG_HIST_TS();
} else {
+ dumpState->mTimestampVerifier.discontinuity();
LOG_AUDIO_STATE();
}
- const FastMixerState * const current = (const FastMixerState *) mCurrent;
- FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
const FastMixerState::Command command = mCommand;
const size_t frameCount = current->mFrameCount;
@@ -446,10 +456,9 @@
frameCount * Format_channelCount(mFormat));
}
// if non-NULL, then duplicate write() to this non-blocking sink
- NBAIO_Sink* teeSink;
- if ((teeSink = current->mTeeSink) != NULL) {
- (void) teeSink->write(buffer, frameCount);
- }
+#ifdef TEE_SINK
+ mTee.write(buffer, frameCount);
+#endif
// FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink,
// but this code should be modified to handle both non-blocking and blocking sinks
dumpState->mWriteSequence++;
@@ -470,35 +479,47 @@
mAttemptedWrite = true;
// FIXME count # of writes blocked excessively, CPU usage, etc. for dump
- ExtendedTimestamp timestamp; // local
- status_t status = mOutputSink->getTimestamp(timestamp);
- if (status == NO_ERROR) {
- const int64_t totalNativeFramesPresented =
- timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
- if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
- mNativeFramesWrittenButNotPresented =
- mTotalNativeFramesWritten - totalNativeFramesPresented;
- mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+ if (mIsWarm) {
+ ExtendedTimestamp timestamp; // local
+ status_t status = mOutputSink->getTimestamp(timestamp);
+ if (status == NO_ERROR) {
+ dumpState->mTimestampVerifier.add(
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+ mSampleRate);
+ const int64_t totalNativeFramesPresented =
timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
- timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
+ mNativeFramesWrittenButNotPresented =
+ mTotalNativeFramesWritten - totalNativeFramesPresented;
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ // We don't compensate for server - kernel time difference and
+ // only update latency if we have valid info.
+ dumpState->mLatencyMs =
+ (double)mNativeFramesWrittenButNotPresented * 1000 / mSampleRate;
+ } else {
+ // HAL reported that more frames were presented than were written
+ mNativeFramesWrittenButNotPresented = 0;
+ status = INVALID_OPERATION;
+ }
} else {
- // HAL reported that more frames were presented than were written
- mNativeFramesWrittenButNotPresented = 0;
- status = INVALID_OPERATION;
+ dumpState->mTimestampVerifier.error();
}
- }
- if (status == NO_ERROR) {
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
- } else {
- // fetch server time if we can't get timestamp
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
- systemTime(SYSTEM_TIME_MONOTONIC);
- // clear out kernel cached position as this may get rapidly stale
- // if we never get a new valid timestamp
- mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+ if (status == NO_ERROR) {
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ } else {
+ // fetch server time if we can't get timestamp
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+ systemTime(SYSTEM_TIME_MONOTONIC);
+ // clear out kernel cached position as this may get rapidly stale
+ // if we never get a new valid timestamp
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+ }
}
}
}
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 235d23f..1c86d9a 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -22,6 +22,7 @@
#include "StateQueue.h"
#include "FastMixerState.h"
#include "FastMixerDumpState.h"
+#include "NBAIO_Tee.h"
namespace android {
@@ -32,7 +33,9 @@
class FastMixer : public FastThread {
public:
- FastMixer();
+ /** FastMixer constructor takes as param the parent MixerThread's io handle (id)
+ for purposes of identification. */
+ explicit FastMixer(audio_io_handle_t threadIoHandle);
virtual ~FastMixer();
FastMixerStateQueue* sq();
@@ -87,6 +90,11 @@
// accessed without lock between multiple threads.
std::atomic_bool mMasterMono;
std::atomic_int_fast64_t mBoottimeOffset;
+
+ const audio_io_handle_t mThreadIoHandle; // parent thread id for debugging purposes
+#ifdef TEE_SINK
+ NBAIO_Tee mTee;
+#endif
}; // class FastMixer
} // namespace android
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index 2e4fb8c..2abfbfb 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -19,11 +19,13 @@
#include "Configuration.h"
#ifdef FAST_THREAD_STATISTICS
-#include <cpustats/CentralTendencyStatistics.h>
+#include <audio_utils/Statistics.h>
#ifdef CPU_FREQUENCY_STATISTICS
#include <cpustats/ThreadCpuUsage.h>
#endif
#endif
+#include <json/json.h>
+#include <string>
#include <utils/Debug.h>
#include <utils/Log.h>
#include "FastMixerDumpState.h"
@@ -68,21 +70,22 @@
dprintf(fd, " FastMixer command=%s writeSequence=%u framesWritten=%u\n"
" numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
" sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
- " mixPeriod=%.2f ms\n",
+ " mixPeriod=%.2f ms latency=%.2f ms\n",
FastMixerState::commandToString(mCommand), mWriteSequence, mFramesWritten,
mNumTracks, mWriteErrors, mUnderruns, mOverruns,
mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
- mixPeriodSec * 1e3);
+ mixPeriodSec * 1e3, mLatencyMs);
+ dprintf(fd, " FastMixer Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
#ifdef FAST_THREAD_STATISTICS
// find the interval of valid samples
- uint32_t bounds = mBounds;
- uint32_t newestOpen = bounds & 0xFFFF;
+ const uint32_t bounds = mBounds;
+ const uint32_t newestOpen = bounds & 0xFFFF;
uint32_t oldestClosed = bounds >> 16;
//uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
uint32_t n;
__builtin_sub_overflow(newestOpen, oldestClosed, &n);
- n = n & 0xFFFF;
+ n &= 0xFFFF;
if (n > mSamplingN) {
ALOGE("too many samples %u", n);
@@ -90,9 +93,9 @@
}
// statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
// and adjusted CPU load in MHz normalized for CPU clock frequency
- CentralTendencyStatistics wall, loadNs;
+ audio_utils::Statistics<double> wall, loadNs;
#ifdef CPU_FREQUENCY_STATISTICS
- CentralTendencyStatistics kHz, loadMHz;
+ audio_utils::Statistics<double> kHz, loadMHz;
uint32_t previousCpukHz = 0;
#endif
// Assuming a normal distribution for cycle times, three standard deviations on either side of
@@ -107,18 +110,18 @@
if (tail != NULL) {
tail[j] = wallNs;
}
- wall.sample(wallNs);
+ wall.add(wallNs);
uint32_t sampleLoadNs = mLoadNs[i];
- loadNs.sample(sampleLoadNs);
+ loadNs.add(sampleLoadNs);
#ifdef CPU_FREQUENCY_STATISTICS
uint32_t sampleCpukHz = mCpukHz[i];
// skip bad kHz samples
if ((sampleCpukHz & ~0xF) != 0) {
- kHz.sample(sampleCpukHz >> 4);
+ kHz.add(sampleCpukHz >> 4);
if (sampleCpukHz == previousCpukHz) {
double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12;
double adjMHz = megacycles / mixPeriodSec; // _not_ wallNs * 1e9
- loadMHz.sample(adjMHz);
+ loadMHz.add(adjMHz);
}
}
previousCpukHz = sampleCpukHz;
@@ -126,42 +129,42 @@
}
if (n) {
dprintf(fd, " Simple moving statistics over last %.1f seconds:\n",
- wall.n() * mixPeriodSec);
+ wall.getN() * mixPeriodSec);
dprintf(fd, " wall clock time in ms per mix cycle:\n"
" mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
- wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6,
- wall.stddev()*1e-6);
+ wall.getMean()*1e-6, wall.getMin()*1e-6, wall.getMax()*1e-6,
+ wall.getStdDev()*1e-6);
dprintf(fd, " raw CPU load in us per mix cycle:\n"
" mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
- loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3,
- loadNs.stddev()*1e-3);
+ loadNs.getMean()*1e-3, loadNs.getMin()*1e-3, loadNs.getMax()*1e-3,
+ loadNs.getStdDev()*1e-3);
} else {
dprintf(fd, " No FastMixer statistics available currently\n");
}
#ifdef CPU_FREQUENCY_STATISTICS
dprintf(fd, " CPU clock frequency in MHz:\n"
" mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
- kHz.mean()*1e-3, kHz.minimum()*1e-3, kHz.maximum()*1e-3, kHz.stddev()*1e-3);
+ kHz.getMean()*1e-3, kHz.getMin()*1e-3, kHz.getMax()*1e-3, kHz.getStdDev()*1e-3);
dprintf(fd, " adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n"
" mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
- loadMHz.mean(), loadMHz.minimum(), loadMHz.maximum(), loadMHz.stddev());
+ loadMHz.getMean(), loadMHz.getMin(), loadMHz.getMax(), loadMHz.getStdDev());
#endif
if (tail != NULL) {
qsort(tail, n, sizeof(uint32_t), compare_uint32_t);
// assume same number of tail samples on each side, left and right
uint32_t count = n / kTailDenominator;
- CentralTendencyStatistics left, right;
+ audio_utils::Statistics<double> left, right;
for (uint32_t i = 0; i < count; ++i) {
- left.sample(tail[i]);
- right.sample(tail[n - (i + 1)]);
+ left.add(tail[i]);
+ right.add(tail[n - (i + 1)]);
}
dprintf(fd, " Distribution of mix cycle times in ms for the tails "
"(> ~3 stddev outliers):\n"
" left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
" right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
- left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
- right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6,
- right.stddev()*1e-6);
+ left.getMean()*1e-6, left.getMin()*1e-6, left.getMax()*1e-6, left.getStdDev()*1e-6,
+ right.getMean()*1e-6, right.getMin()*1e-6, right.getMax()*1e-6,
+ right.getStdDev()*1e-6);
delete[] tail;
}
#endif
@@ -203,4 +206,49 @@
}
}
+Json::Value FastMixerDumpState::getJsonDump() const
+{
+ Json::Value root(Json::objectValue);
+ if (mCommand == FastMixerState::INITIAL) {
+ root["status"] = "uninitialized";
+ return root;
+ }
+#ifdef FAST_THREAD_STATISTICS
+ // find the interval of valid samples
+ const uint32_t bounds = mBounds;
+ const uint32_t newestOpen = bounds & 0xFFFF;
+ uint32_t oldestClosed = bounds >> 16;
+
+ //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+ uint32_t n;
+ __builtin_sub_overflow(newestOpen, oldestClosed, &n);
+ n &= 0xFFFF;
+
+ if (n > mSamplingN) {
+ ALOGE("too many samples %u", n);
+ n = mSamplingN;
+ }
+ // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
+ // and adjusted CPU load in MHz normalized for CPU clock frequency
+ Json::Value jsonWall(Json::arrayValue);
+ Json::Value jsonLoadNs(Json::arrayValue);
+ // loop over all the samples
+ for (uint32_t j = 0; j < n; ++j) {
+ size_t i = oldestClosed++ & (mSamplingN - 1);
+ uint32_t wallNs = mMonotonicNs[i];
+ jsonWall.append(wallNs);
+ uint32_t sampleLoadNs = mLoadNs[i];
+ jsonLoadNs.append(sampleLoadNs);
+ }
+ if (n) {
+ root["wall_clock_time_ns"] = jsonWall;
+ root["raw_cpu_load_ns"] = jsonLoadNs;
+ root["status"] = "ok";
+ } else {
+ root["status"] = "unavailable";
+ }
+#endif
+ return root;
+}
+
} // android
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index 8ef31d1..69c2e4e 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -18,6 +18,9 @@
#define ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
#include <stdint.h>
+#include <string>
+#include <audio_utils/TimestampVerifier.h>
+#include <json/json.h>
#include "Configuration.h"
#include "FastThreadDumpState.h"
#include "FastMixerState.h"
@@ -64,8 +67,10 @@
FastMixerDumpState();
/*virtual*/ ~FastMixerDumpState();
- void dump(int fd) const; // should only be called on a stable copy, not the original
+ void dump(int fd) const; // should only be called on a stable copy, not the original
+ Json::Value getJsonDump() const; // should only be called on a stable copy, not the original
+ double mLatencyMs = 0.; // measured latency, default of 0 if no valid timestamp read.
uint32_t mWriteSequence; // incremented before and after each write()
uint32_t mFramesWritten; // total number of frames written successfully
uint32_t mNumTracks; // total number of active fast tracks
@@ -74,6 +79,9 @@
size_t mFrameCount;
uint32_t mTrackMask; // mask of active tracks
FastTrackDump mTracks[FastMixerState::kMaxFastTracks];
+
+ // For timestamp statistics.
+ TimestampVerifier<int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
};
} // android
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index 36d8eef..b98842d 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -35,7 +35,7 @@
FastMixerState::FastMixerState() : FastThreadState(),
// mFastTracks
mFastTracksGen(0), mTrackMask(0), mOutputSink(NULL), mOutputSinkGen(0),
- mFrameCount(0), mTeeSink(NULL)
+ mFrameCount(0)
{
int ok = pthread_once(&sMaxFastTracksOnce, sMaxFastTracksInit);
if (ok != 0) {
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 2be1e91..c7fcbd8 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -77,9 +77,6 @@
WRITE = 0x10, // write to output sink
MIX_WRITE = 0x18; // mix tracks and write to output sink
- // This might be a one-time configuration rather than per-state
- NBAIO_Sink* mTeeSink; // if non-NULL, then duplicate write()s to this non-blocking sink
-
// never returns NULL; asserts if command is invalid
static const char *commandToString(Command command);
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
index 6f546c3..968d5aa 100644
--- a/services/audioflinger/MmapTracks.h
+++ b/services/audioflinger/MmapTracks.h
@@ -28,6 +28,7 @@
audio_format_t format,
audio_channel_mask_t channelMask,
audio_session_t sessionId,
+ bool isOut,
uid_t uid,
pid_t pid,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
@@ -39,8 +40,9 @@
audio_session_t triggerSession);
virtual void stop();
virtual bool isFastTrack() const { return false; }
+ bool isDirect() const override { return true; }
- static void appendDumpHeader(String8& result);
+ void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
// protected by MMapThread::mLock
diff --git a/services/audioflinger/NBAIO_Tee.cpp b/services/audioflinger/NBAIO_Tee.cpp
new file mode 100644
index 0000000..53083d5
--- /dev/null
+++ b/services/audioflinger/NBAIO_Tee.cpp
@@ -0,0 +1,517 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "NBAIO_Tee"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <deque>
+#include <dirent.h>
+#include <future>
+#include <list>
+#include <vector>
+
+#include <audio_utils/format.h>
+#include <audio_utils/sndfile.h>
+#include <media/nbaio/PipeReader.h>
+
+#include "Configuration.h"
+#include "NBAIO_Tee.h"
+
+// Enabled with TEE_SINK in Configuration.h
+#ifdef TEE_SINK
+
+namespace android {
+
+/*
+ Tee filenames generated as follows:
+
+ "aftee_Date_ThreadId_C_reason.wav" RecordThread
+ "aftee_Date_ThreadId_M_reason.wav" MixerThread (Normal)
+ "aftee_Date_ThreadId_F_reason.wav" MixerThread (Fast)
+ "aftee_Date_ThreadId_TrackId_R_reason.wav" RecordTrack
+ "aftee_Date_ThreadId_TrackId_TrackName_T_reason.wav" PlaybackTrack
+
+ where Date = YYYYmmdd_HHMMSS_MSEC
+
+ where Reason = [ DTOR | DUMP | REMOVE ]
+
+ Examples:
+ aftee_20180424_153811_038_13_57_2_T_REMOVE.wav
+ aftee_20180424_153811_218_13_57_2_T_REMOVE.wav
+ aftee_20180424_153811_378_13_57_2_T_REMOVE.wav
+ aftee_20180424_153825_147_62_C_DUMP.wav
+ aftee_20180424_153825_148_62_59_R_DUMP.wav
+ aftee_20180424_153825_149_13_F_DUMP.wav
+ aftee_20180424_153842_125_62_59_R_REMOVE.wav
+ aftee_20180424_153842_168_62_C_DTOR.wav
+*/
+
+static constexpr char DEFAULT_PREFIX[] = "aftee_";
+static constexpr char DEFAULT_DIRECTORY[] = "/data/misc/audioserver";
+static constexpr size_t DEFAULT_THREADPOOL_SIZE = 8;
+
+/** AudioFileHandler manages temporary audio wav files with a least recently created
+ retention policy.
+
+ The temporary filenames are systematically generated. A common filename prefix,
+ storage directory, and concurrency pool are passed in on creating the object.
+
+ Temporary files are created by "create", which returns a filename generated by
+
+ prefix + 14 char date + suffix
+
+ TODO Move to audio_utils.
+ TODO Avoid pointing two AudioFileHandlers to the same directory and prefix
+ as we don't have a prefix specific lock file. */
+
+class AudioFileHandler {
+public:
+
+ AudioFileHandler(const std::string &prefix, const std::string &directory, size_t pool)
+ : mThreadPool(pool)
+ , mPrefix(prefix)
+ {
+ (void)setDirectory(directory);
+ }
+
+ /** returns filename of created audio file, else empty string on failure. */
+ std::string create(
+ std::function<ssize_t /* frames_read */
+ (void * /* buffer */, size_t /* size_in_frames */)> reader,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ audio_format_t format,
+ const std::string &suffix);
+
+private:
+ /** sets the current directory. this is currently private to avoid confusion
+ when changing while pending operations are occurring (it's okay, but
+ weakly synchronized). */
+ status_t setDirectory(const std::string &directory);
+
+ /** cleans current directory and returns the directory name done. */
+ status_t clean(std::string *dir = nullptr);
+
+ /** creates an audio file from a reader functor passed in. */
+ status_t createInternal(
+ std::function<ssize_t /* frames_read */
+ (void * /* buffer */, size_t /* size_in_frames */)> reader,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ audio_format_t format,
+ const std::string &filename);
+
+ static bool isDirectoryValid(const std::string &directory) {
+ return directory.size() > 0 && directory[0] == '/';
+ }
+
+ std::string generateFilename(const std::string &suffix) const {
+ char fileTime[sizeof("YYYYmmdd_HHMMSS_\0")];
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ struct tm tm;
+ localtime_r(&tv.tv_sec, &tm);
+ LOG_ALWAYS_FATAL_IF(strftime(fileTime, sizeof(fileTime), "%Y%m%d_%H%M%S_", &tm) == 0,
+ "incorrect fileTime buffer");
+ char msec[4];
+ (void)snprintf(msec, sizeof(msec), "%03d", (int)(tv.tv_usec / 1000));
+ return mPrefix + fileTime + msec + suffix + ".wav";
+ }
+
+ bool isManagedFilename(const char *name) {
+ constexpr size_t FILENAME_LEN_DATE = 4 + 2 + 2 // %Y%m%d%
+ + 1 + 2 + 2 + 2 // _H%M%S
+ + 1 + 3; //_MSEC
+ const size_t prefixLen = mPrefix.size();
+ const size_t nameLen = strlen(name);
+
+ // reject on size, prefix, and .wav
+ if (nameLen < prefixLen + FILENAME_LEN_DATE + 4 /* .wav */
+ || strncmp(name, mPrefix.c_str(), prefixLen) != 0
+ || strcmp(name + nameLen - 4, ".wav") != 0) {
+ return false;
+ }
+
+ // validate date portion
+ const char *date = name + prefixLen;
+ return std::all_of(date, date + 8, isdigit)
+ && date[8] == '_'
+ && std::all_of(date + 9, date + 15, isdigit)
+ && date[15] == '_'
+ && std::all_of(date + 16, date + 19, isdigit);
+ }
+
+ // yet another ThreadPool implementation.
+ class ThreadPool {
+ public:
+ ThreadPool(size_t size)
+ : mThreadPoolSize(size)
+ { }
+
+ /** launches task "name" with associated function "func".
+ if the threadpool is exhausted, it will launch on calling function */
+ status_t launch(const std::string &name, std::function<status_t()> func);
+
+ private:
+ std::mutex mLock;
+ std::list<std::pair<
+ std::string, std::future<status_t>>> mFutures; // GUARDED_BY(mLock)
+
+ const size_t mThreadPoolSize;
+ } mThreadPool;
+
+ const std::string mPrefix;
+ std::mutex mLock;
+ std::string mDirectory; // GUARDED_BY(mLock)
+ std::deque<std::string> mFiles; // GUARDED_BY(mLock) sorted list of files by creation time
+
+ static constexpr size_t FRAMES_PER_READ = 1024;
+ static constexpr size_t MAX_FILES_READ = 1024;
+ static constexpr size_t MAX_FILES_KEEP = 32;
+};
+
+/* static */
+void NBAIO_Tee::NBAIO_TeeImpl::dumpTee(
+ int fd, const NBAIO_SinkSource &sinkSource, const std::string &suffix)
+{
+ // Singleton. Constructed thread-safe on first call, never destroyed.
+ static AudioFileHandler audioFileHandler(
+ DEFAULT_PREFIX, DEFAULT_DIRECTORY, DEFAULT_THREADPOOL_SIZE);
+
+ auto &source = sinkSource.second;
+ if (source.get() == nullptr) {
+ return;
+ }
+
+ const NBAIO_Format format = source->format();
+ bool firstRead = true;
+ std::string filename = audioFileHandler.create(
+ // this functor must not hold references to stack
+ [firstRead, sinkSource] (void *buffer, size_t frames) mutable {
+ auto &source = sinkSource.second;
+ ssize_t actualRead = source->read(buffer, frames);
+ if (actualRead == (ssize_t)OVERRUN && firstRead) {
+ // recheck once
+ actualRead = source->read(buffer, frames);
+ }
+ firstRead = false;
+ return actualRead;
+ },
+ Format_sampleRate(format),
+ Format_channelCount(format),
+ format.mFormat,
+ suffix);
+
+ if (fd >= 0 && filename.size() > 0) {
+ dprintf(fd, "tee wrote to %s\n", filename.c_str());
+ }
+}
+
+/* static */
+NBAIO_Tee::NBAIO_TeeImpl::NBAIO_SinkSource NBAIO_Tee::NBAIO_TeeImpl::makeSinkSource(
+ const NBAIO_Format &format, size_t frames, bool *enabled)
+{
+ if (Format_isValid(format) && audio_is_linear_pcm(format.mFormat)) {
+ Pipe *pipe = new Pipe(frames, format);
+ size_t numCounterOffers = 0;
+ const NBAIO_Format offers[1] = {format};
+ ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
+ if (index != 0) {
+ ALOGW("pipe failure to negotiate: %zd", index);
+ goto exit;
+ }
+ PipeReader *pipeReader = new PipeReader(*pipe);
+ numCounterOffers = 0;
+ index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
+ if (index != 0) {
+ ALOGW("pipeReader failure to negotiate: %zd", index);
+ goto exit;
+ }
+ if (enabled != nullptr) *enabled = true;
+ return {pipe, pipeReader};
+ }
+exit:
+ if (enabled != nullptr) *enabled = false;
+ return {nullptr, nullptr};
+}
+
+std::string AudioFileHandler::create(
+ std::function<ssize_t /* frames_read */
+ (void * /* buffer */, size_t /* size_in_frames */)> reader,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ audio_format_t format,
+ const std::string &suffix)
+{
+ const std::string filename = generateFilename(suffix);
+
+ if (mThreadPool.launch(std::string("create ") + filename,
+ [=]() { return createInternal(reader, sampleRate, channelCount, format, filename); })
+ == NO_ERROR) {
+ return filename;
+ }
+ return "";
+}
+
+status_t AudioFileHandler::setDirectory(const std::string &directory)
+{
+ if (!isDirectoryValid(directory)) return BAD_VALUE;
+
+ // TODO: consider using std::filesystem in C++17
+ DIR *dir = opendir(directory.c_str());
+
+ if (dir == nullptr) {
+ ALOGW("%s: cannot open directory %s", __func__, directory.c_str());
+ return BAD_VALUE;
+ }
+
+ size_t toRemove = 0;
+ decltype(mFiles) files;
+
+ while (files.size() < MAX_FILES_READ) {
+ errno = 0;
+ const struct dirent *result = readdir(dir);
+ if (result == nullptr) {
+ ALOGW_IF(errno != 0, "%s: readdir failure %s", __func__, strerror(errno));
+ break;
+ }
+ // is it a managed filename?
+ if (!isManagedFilename(result->d_name)) {
+ continue;
+ }
+ files.emplace_back(result->d_name);
+ }
+ (void)closedir(dir);
+
+ // OPTIMIZATION: we don't need to stat each file, the filenames names are
+ // already (roughly) ordered by creation date. we use std::deque instead
+ // of std::set for faster insertion and sorting times.
+
+ if (files.size() > MAX_FILES_KEEP) {
+ // removed files can use a partition (no need to do a full sort).
+ toRemove = files.size() - MAX_FILES_KEEP;
+ std::nth_element(files.begin(), files.begin() + toRemove - 1, files.end());
+ }
+
+ // kept files must be sorted.
+ std::sort(files.begin() + toRemove, files.end());
+
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+
+ mDirectory = directory;
+ mFiles = std::move(files);
+ }
+
+ if (toRemove > 0) { // launch a clean in background.
+ (void)mThreadPool.launch(
+ std::string("cleaning ") + directory, [this]() { return clean(); });
+ }
+ return NO_ERROR;
+}
+
+status_t AudioFileHandler::clean(std::string *directory)
+{
+ std::vector<std::string> filesToRemove;
+ std::string dir;
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+
+ if (!isDirectoryValid(mDirectory)) return NO_INIT;
+
+ dir = mDirectory;
+ if (mFiles.size() > MAX_FILES_KEEP) {
+ size_t toRemove = mFiles.size() - MAX_FILES_KEEP;
+
+ // use move and erase to efficiently transfer std::string
+ std::move(mFiles.begin(),
+ mFiles.begin() + toRemove,
+ std::back_inserter(filesToRemove));
+ mFiles.erase(mFiles.begin(), mFiles.begin() + toRemove);
+ }
+ }
+
+ std::string dirp = dir + "/";
+ // remove files outside of lock for better concurrency.
+ for (const auto &file : filesToRemove) {
+ (void)unlink((dirp + file).c_str());
+ }
+
+ // return the directory if requested.
+ if (directory != nullptr) {
+ *directory = dir;
+ }
+ return NO_ERROR;
+}
+
+status_t AudioFileHandler::ThreadPool::launch(
+ const std::string &name, std::function<status_t()> func)
+{
+ if (mThreadPoolSize > 1) {
+ std::lock_guard<std::mutex> _l(mLock);
+ if (mFutures.size() >= mThreadPoolSize) {
+ for (auto it = mFutures.begin(); it != mFutures.end();) {
+ const std::string &filename = it->first;
+ std::future<status_t> &future = it->second;
+ if (!future.valid() ||
+ future.wait_for(std::chrono::seconds(0)) == std::future_status::ready) {
+ ALOGV("%s: future %s ready", __func__, filename.c_str());
+ it = mFutures.erase(it);
+ } else {
+ ALOGV("%s: future %s not ready", __func__, filename.c_str());
+ ++it;
+ }
+ }
+ }
+ if (mFutures.size() < mThreadPoolSize) {
+ ALOGV("%s: deferred calling %s", __func__, name.c_str());
+ mFutures.emplace_back(name, std::async(std::launch::async, func));
+ return NO_ERROR;
+ }
+ }
+ ALOGV("%s: immediate calling %s", __func__, name.c_str());
+ return func();
+}
+
+status_t AudioFileHandler::createInternal(
+ std::function<ssize_t /* frames_read */
+ (void * /* buffer */, size_t /* size_in_frames */)> reader,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ audio_format_t format,
+ const std::string &filename)
+{
+ // Attempt to choose the best matching file format.
+ // We can choose any sf_format
+ // but writeFormat must be one of 16, 32, float
+ // due to sf_writef compatibility.
+ int sf_format;
+ audio_format_t writeFormat;
+ switch (format) {
+ case AUDIO_FORMAT_PCM_8_BIT:
+ case AUDIO_FORMAT_PCM_16_BIT:
+ sf_format = SF_FORMAT_PCM_16;
+ writeFormat = AUDIO_FORMAT_PCM_16_BIT;
+ ALOGV("%s: %s using PCM_16 for format %#x", __func__, filename.c_str(), format);
+ break;
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ case AUDIO_FORMAT_PCM_32_BIT:
+ sf_format = SF_FORMAT_PCM_32;
+ writeFormat = AUDIO_FORMAT_PCM_32_BIT;
+ ALOGV("%s: %s using PCM_32 for format %#x", __func__, filename.c_str(), format);
+ break;
+ case AUDIO_FORMAT_PCM_FLOAT:
+ sf_format = SF_FORMAT_FLOAT;
+ writeFormat = AUDIO_FORMAT_PCM_FLOAT;
+ ALOGV("%s: %s using PCM_FLOAT for format %#x", __func__, filename.c_str(), format);
+ break;
+ default:
+ // TODO:
+ // handle audio_has_proportional_frames() formats.
+ // handle compressed formats as single byte files.
+ return BAD_VALUE;
+ }
+
+ std::string directory;
+ status_t status = clean(&directory);
+ if (status != NO_ERROR) return status;
+ std::string dirPrefix = directory + "/";
+
+ const std::string path = dirPrefix + filename;
+
+ /* const */ SF_INFO info = {
+ .frames = 0,
+ .samplerate = (int)sampleRate,
+ .channels = (int)channelCount,
+ .format = SF_FORMAT_WAV | sf_format,
+ };
+ SNDFILE *sf = sf_open(path.c_str(), SFM_WRITE, &info);
+ if (sf == nullptr) {
+ return INVALID_OPERATION;
+ }
+
+ size_t total = 0;
+ void *buffer = malloc(FRAMES_PER_READ * std::max(
+ channelCount * audio_bytes_per_sample(writeFormat), //output framesize
+ channelCount * audio_bytes_per_sample(format))); // input framesize
+ if (buffer == nullptr) {
+ sf_close(sf);
+ return NO_MEMORY;
+ }
+
+ for (;;) {
+ const ssize_t actualRead = reader(buffer, FRAMES_PER_READ);
+ if (actualRead <= 0) {
+ break;
+ }
+
+ // Convert input format to writeFormat as needed.
+ if (format != writeFormat) {
+ memcpy_by_audio_format(
+ buffer, writeFormat, buffer, format, actualRead * info.channels);
+ }
+
+ ssize_t reallyWritten;
+ switch (writeFormat) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ reallyWritten = sf_writef_short(sf, (const int16_t *)buffer, actualRead);
+ break;
+ case AUDIO_FORMAT_PCM_32_BIT:
+ reallyWritten = sf_writef_int(sf, (const int32_t *)buffer, actualRead);
+ break;
+ case AUDIO_FORMAT_PCM_FLOAT:
+ reallyWritten = sf_writef_float(sf, (const float *)buffer, actualRead);
+ break;
+ default:
+ LOG_ALWAYS_FATAL("%s: %s writeFormat: %#x", __func__, filename.c_str(), writeFormat);
+ break;
+ }
+
+ if (reallyWritten < 0) {
+ ALOGW("%s: %s write error: %zd", __func__, filename.c_str(), reallyWritten);
+ break;
+ }
+ total += reallyWritten;
+ if (reallyWritten < actualRead) {
+ ALOGW("%s: %s write short count: %zd < %zd",
+ __func__, filename.c_str(), reallyWritten, actualRead);
+ break;
+ }
+ }
+ sf_close(sf);
+ free(buffer);
+ if (total == 0) {
+ (void)unlink(path.c_str());
+ return NOT_ENOUGH_DATA;
+ }
+
+ // Success: add our name to managed files.
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+ // weak synchronization - only update mFiles if the directory hasn't changed.
+ if (mDirectory == directory) {
+ mFiles.emplace_back(filename); // add to the end to preserve sort.
+ }
+ }
+ return NO_ERROR; // return full path
+}
+
+} // namespace android
+
+#endif // TEE_SINK
diff --git a/services/audioflinger/NBAIO_Tee.h b/services/audioflinger/NBAIO_Tee.h
new file mode 100644
index 0000000..fed8cc8
--- /dev/null
+++ b/services/audioflinger/NBAIO_Tee.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Enabled with TEE_SINK in Configuration.h
+#ifndef ANDROID_NBAIO_TEE_H
+#define ANDROID_NBAIO_TEE_H
+
+#ifdef TEE_SINK
+
+#include <atomic>
+#include <mutex>
+#include <set>
+
+#include <cutils/properties.h>
+#include <media/nbaio/NBAIO.h>
+
+namespace android {
+
+/**
+ * The NBAIO_Tee uses the NBAIO Pipe and PipeReader for nonblocking
+ * data collection, for eventual dump to log files.
+ * See https://source.android.com/devices/audio/debugging for how to
+ * enable by ro.debuggable and af.tee properties.
+ *
+ * The write() into the NBAIO_Tee is therefore nonblocking,
+ * but changing NBAIO_Tee formats with set() cannot be done during a write();
+ * usually the caller already implements this mutual exclusion.
+ *
+ * All other calls except set() vs write() may occur at any time.
+ *
+ * dump() disruption is minimized to the caller since system calls are executed
+ * in an asynchronous thread (when possible).
+ *
+ * Currently the NBAIO_Tee is "hardwired" for AudioFlinger support.
+ *
+ * Some AudioFlinger specific notes:
+ *
+ * 1) Tees capture only linear PCM data.
+ * 2) Tees without any data written are considered empty and do not generate
+ * any output files.
+ * 2) Once a Tee dumps data, it is considered "emptied" and new data
+ * needs to be written before another Tee file is generated.
+ * 3) Tee file format is
+ * WAV integer PCM 16 bit for AUDIO_FORMAT_PCM_8_BIT, AUDIO_FORMAT_PCM_16_BIT.
+ * WAV integer PCM 32 bit for AUDIO_FORMAT_PCM_8_24_BIT, AUDIO_FORMAT_PCM_24_BIT_PACKED
+ * AUDIO_FORMAT_PCM_32_BIT.
+ * WAV float PCM 32 bit for AUDIO_FORMAT_PCM_FLOAT.
+ *
+ * Input_Thread:
+ * 1) Capture buffer is teed when read from the HAL, before resampling for the AudioRecord
+ * client.
+ *
+ * Output_Thread:
+ * 1) MixerThreads will tee at the FastMixer output (if it has one) or at the
+ * NormalMixer output (if no FastMixer).
+ * 2) DuplicatingThreads do not tee any mixed data. Apply a tee on the downstream OutputTrack
+ * or on the upstream playback Tracks.
+ * 3) DirectThreads and OffloadThreads do not tee any data. The upstream track
+ * (if linear PCM format) may be teed to discover data.
+ * 4) MmapThreads are not supported.
+ *
+ * Tracks:
+ * 1) RecordTracks and playback Tracks tee as data is being written to or
+ * read from the shared client-server track buffer by the associated Threads.
+ * 2) The mechanism is on the AudioBufferProvider release() so large static Track
+ * playback may not show any Tee data depending on when it is released.
+ * 3) When a track becomes inactive, the Thread will trigger a dump.
+ */
+
+class NBAIO_Tee {
+public:
+ /* TEE_FLAG is used in set() and must match the flags for the af.tee property
+ given in https://source.android.com/devices/audio/debugging
+ */
+ enum TEE_FLAG {
+ TEE_FLAG_NONE = 0,
+ TEE_FLAG_INPUT_THREAD = (1 << 0), // treat as a Tee for input (Capture) Threads
+ TEE_FLAG_OUTPUT_THREAD = (1 << 1), // treat as a Tee for output (Playback) Threads
+ TEE_FLAG_TRACK = (1 << 2), // treat as a Tee for tracks (Record and Playback)
+ };
+
+ NBAIO_Tee()
+ : mTee(std::make_shared<NBAIO_TeeImpl>())
+ {
+ getRunningTees().add(mTee);
+ }
+
+ ~NBAIO_Tee() {
+ getRunningTees().remove(mTee);
+ dump(-1, "_DTOR"); // log any data remaining in Tee.
+ }
+
+ /**
+ * \brief set is used for deferred configuration of Tee.
+ *
+ * May be called anytime except concurrently with write().
+ *
+ * \param format NBAIO_Format used to open NBAIO pipes
+ * \param flags (https://source.android.com/devices/audio/debugging)
+ * - TEE_FLAG_NONE to bypass af.tee property checks (default);
+ * - TEE_FLAG_INPUT_THREAD to check af.tee if input thread logging set;
+ * - TEE_FLAG_OUTPUT_THREAD to check af.tee if output thread logging set;
+ * - TEE_FLAG_TRACK to check af.tee if track logging set.
+ * \param frames number of frames to open the NBAIO pipe (set to 0 to use default).
+ *
+ * \return
+ * - NO_ERROR on success (or format unchanged)
+ * - BAD_VALUE if format or flags invalid.
+ * - PERMISSION_DENIED if flags not allowed by af.tee
+ */
+
+ status_t set(const NBAIO_Format &format,
+ TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const {
+ return mTee->set(format, flags, frames);
+ }
+
+ status_t set(uint32_t sampleRate, uint32_t channelCount, audio_format_t format,
+ TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const {
+ return mTee->set(Format_from_SR_C(sampleRate, channelCount, format), flags, frames);
+ }
+
+ /**
+ * \brief write data to the tee.
+ *
+ * This call is lock free (as shared pointer and NBAIO is lock free);
+ * may be called simultaneous to all methods except set().
+ *
+ * \param buffer to write to pipe.
+ * \param frameCount in frames as specified by the format passed to set()
+ */
+
+ void write(const void *buffer, size_t frameCount) const {
+ mTee->write(buffer, frameCount);
+ }
+
+ /** sets Tee id string which identifies the generated file (should be unique). */
+ void setId(const std::string &id) const {
+ mTee->setId(id);
+ }
+
+ /**
+ * \brief dump the audio content written to the Tee.
+ *
+ * \param fd file descriptor to write dumped filename for logging, use -1 to ignore.
+ * \param reason string suffix to append to the generated file.
+ */
+ void dump(int fd, const std::string &reason = "") const {
+ mTee->dump(fd, reason);
+ }
+
+ /**
+ * \brief dump all Tees currently alive.
+ *
+ * \param fd file descriptor to write dumped filename for logging, use -1 to ignore.
+ * \param reason string suffix to append to the generated file.
+ */
+ static void dumpAll(int fd, const std::string &reason = "") {
+ getRunningTees().dump(fd, reason);
+ }
+
+private:
+
+ /** The underlying implementation of the Tee - the lifetime is through
+ a shared pointer so destruction of the NBAIO_Tee container may proceed
+ even though dumping is occurring. */
+ class NBAIO_TeeImpl {
+ public:
+ status_t set(const NBAIO_Format &format, TEE_FLAG flags, size_t frames) {
+ static const int teeConfig = property_get_bool("ro.debuggable", false)
+ ? property_get_int32("af.tee", 0) : 0;
+
+ // check the type of Tee
+ const TEE_FLAG type = TEE_FLAG(
+ flags & (TEE_FLAG_INPUT_THREAD | TEE_FLAG_OUTPUT_THREAD | TEE_FLAG_TRACK));
+
+ // parameter flags can't select multiple types.
+ if (__builtin_popcount(type) > 1) {
+ return BAD_VALUE;
+ }
+
+ // if type is set, we check to see if it is permitted by configuration.
+ if (type != 0 && (type & teeConfig) == 0) {
+ return PERMISSION_DENIED;
+ }
+
+ // determine number of frames for Tee
+ if (frames == 0) {
+ // TODO: consider varying frame count based on type.
+ frames = DEFAULT_TEE_FRAMES;
+ }
+
+ // TODO: should we check minimum number of frames?
+
+ // don't do anything if format and frames are the same.
+ if (Format_isEqual(format, mFormat) && frames == mFrames) {
+ return NO_ERROR;
+ }
+
+ bool enabled = false;
+ auto sinksource = makeSinkSource(format, frames, &enabled);
+
+ // enabled is set if makeSinkSource is successful.
+ // Note: as mentioned in NBAIO_Tee::set(), don't call set() while write() is
+ // ongoing.
+ if (enabled) {
+ std::lock_guard<std::mutex> _l(mLock);
+ mFlags = flags;
+ mFormat = format; // could get this from the Sink.
+ mFrames = frames;
+ mSinkSource = std::move(sinksource);
+ mEnabled.store(true);
+ return NO_ERROR;
+ }
+ return BAD_VALUE;
+ }
+
+ void setId(const std::string &id) {
+ std::lock_guard<std::mutex> _l(mLock);
+ mId = id;
+ }
+
+ void dump(int fd, const std::string &reason) {
+ if (!mDataReady.exchange(false)) return;
+ std::string suffix;
+ NBAIO_SinkSource sinkSource;
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+ suffix = mId + reason;
+ sinkSource = mSinkSource;
+ }
+ dumpTee(fd, sinkSource, suffix);
+ }
+
+ void write(const void *buffer, size_t frameCount) {
+ if (!mEnabled.load() || frameCount == 0) return;
+ (void)mSinkSource.first->write(buffer, frameCount);
+ mDataReady.store(true);
+ }
+
+ private:
+ // TRICKY: We need to keep the NBAIO_Sink and NBAIO_Source both alive at the same time
+ // because PipeReader holds a naked reference (not a strong or weak pointer) to Pipe.
+ using NBAIO_SinkSource = std::pair<sp<NBAIO_Sink>, sp<NBAIO_Source>>;
+
+ static void dumpTee(int fd, const NBAIO_SinkSource& sinkSource, const std::string& suffix);
+
+ static NBAIO_SinkSource makeSinkSource(
+ const NBAIO_Format &format, size_t frames, bool *enabled);
+
+ // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes
+ static constexpr size_t DEFAULT_TEE_FRAMES = 0x200000;
+
+ // atomic status checking
+ std::atomic<bool> mEnabled{false};
+ std::atomic<bool> mDataReady{false};
+
+ // locked dump information
+ mutable std::mutex mLock;
+ std::string mId; // GUARDED_BY(mLock)
+ TEE_FLAG mFlags = TEE_FLAG_NONE; // GUARDED_BY(mLock)
+ NBAIO_Format mFormat = Format_Invalid; // GUARDED_BY(mLock)
+ size_t mFrames = 0; // GUARDED_BY(mLock)
+ NBAIO_SinkSource mSinkSource; // GUARDED_BY(mLock)
+ };
+
+ /** RunningTees tracks current running tees for dump purposes.
+ It is implemented to have minimal locked regions, to be transparent to the caller. */
+ class RunningTees {
+ public:
+ void add(const std::shared_ptr<NBAIO_TeeImpl> &tee) {
+ std::lock_guard<std::mutex> _l(mLock);
+ ALOGW_IF(!mTees.emplace(tee).second,
+ "%s: %p already exists in mTees", __func__, tee.get());
+ }
+
+ void remove(const std::shared_ptr<NBAIO_TeeImpl> &tee) {
+ std::lock_guard<std::mutex> _l(mLock);
+ ALOGW_IF(mTees.erase(tee) != 1,
+ "%s: %p doesn't exist in mTees", __func__, tee.get());
+ }
+
+ void dump(int fd, const std::string &reason) {
+ std::vector<std::shared_ptr<NBAIO_TeeImpl>> tees; // safe snapshot of tees
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+ tees.insert(tees.end(), mTees.begin(), mTees.end());
+ }
+ for (const auto &tee : tees) {
+ tee->dump(fd, reason);
+ }
+ }
+
+ private:
+ std::mutex mLock;
+ std::set<std::shared_ptr<NBAIO_TeeImpl>> mTees; // GUARDED_BY(mLock)
+ };
+
+ // singleton
+ static RunningTees &getRunningTees() {
+ static RunningTees runningTees;
+ return runningTees;
+ }
+
+ // The NBAIO TeeImpl may have lifetime longer than NBAIO_Tee if
+ // RunningTees::dump() is being called simultaneous to ~NBAIO_Tee().
+ // This is allowed for maximum concurrency.
+ const std::shared_ptr<NBAIO_TeeImpl> mTee;
+}; // NBAIO_Tee
+
+} // namespace android
+
+#endif // TEE_SINK
+#endif // !ANDROID_NBAIO_TEE_H
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index e5cb8a2..7b165a1 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -24,8 +24,9 @@
#include <audio_utils/primitives.h>
#include "AudioFlinger.h"
-#include "ServiceUtilities.h"
#include <media/AudioParameter.h>
+#include <media/PatchBuilder.h>
+#include <mediautils/ServiceUtilities.h>
// ----------------------------------------------------------------------------
@@ -49,111 +50,77 @@
struct audio_port *ports)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->listAudioPorts(num_ports, ports);
- }
- return NO_INIT;
+ return mPatchPanel.listAudioPorts(num_ports, ports);
}
/* Get supported attributes for a given audio port */
status_t AudioFlinger::getAudioPort(struct audio_port *port)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->getAudioPort(port);
- }
- return NO_INIT;
+ return mPatchPanel.getAudioPort(port);
}
-
/* Connect a patch between several source and sink ports */
status_t AudioFlinger::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->createAudioPatch(patch, handle);
- }
- return NO_INIT;
+ return mPatchPanel.createAudioPatch(patch, handle);
}
/* Disconnect a patch */
status_t AudioFlinger::releaseAudioPatch(audio_patch_handle_t handle)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->releaseAudioPatch(handle);
- }
- return NO_INIT;
+ return mPatchPanel.releaseAudioPatch(handle);
}
-
/* List connected audio ports and they attributes */
status_t AudioFlinger::listAudioPatches(unsigned int *num_patches,
struct audio_patch *patches)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->listAudioPatches(num_patches, patches);
+ return mPatchPanel.listAudioPatches(num_patches, patches);
+}
+
+status_t AudioFlinger::PatchPanel::SoftwarePatch::getLatencyMs_l(double *latencyMs) const
+{
+ const auto& iter = mPatchPanel.mPatches.find(mPatchHandle);
+ if (iter != mPatchPanel.mPatches.end()) {
+ return iter->second.getLatencyMs(latencyMs);
+ } else {
+ return BAD_VALUE;
}
- return NO_INIT;
-}
-
-/* Set audio port configuration */
-status_t AudioFlinger::setAudioPortConfig(const struct audio_port_config *config)
-{
- Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->setAudioPortConfig(config);
- }
- return NO_INIT;
-}
-
-
-AudioFlinger::PatchPanel::PatchPanel(const sp<AudioFlinger>& audioFlinger)
- : mAudioFlinger(audioFlinger)
-{
-}
-
-AudioFlinger::PatchPanel::~PatchPanel()
-{
}
/* List connected audio ports and their attributes */
status_t AudioFlinger::PatchPanel::listAudioPorts(unsigned int *num_ports __unused,
struct audio_port *ports __unused)
{
- ALOGV("listAudioPorts");
+ ALOGV(__func__);
return NO_ERROR;
}
/* Get supported attributes for a given audio port */
status_t AudioFlinger::PatchPanel::getAudioPort(struct audio_port *port __unused)
{
- ALOGV("getAudioPort");
+ ALOGV(__func__);
return NO_ERROR;
}
-
/* Connect a patch between several source and sink ports */
status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
- status_t status = NO_ERROR;
- audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
- sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
if (handle == NULL || patch == NULL) {
return BAD_VALUE;
}
- ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
- patch->num_sources, patch->num_sinks, *handle);
- if (audioflinger == 0) {
- return NO_INIT;
- }
+ ALOGV("%s() num_sources %d num_sinks %d handle %d",
+ __func__, patch->num_sources, patch->num_sinks, *handle);
+ status_t status = NO_ERROR;
+ audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
- if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
- (patch->num_sinks == 0 && patch->num_sources != 2) ||
- patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+ if (!audio_patch_is_valid(patch) || (patch->num_sinks == 0 && patch->num_sources != 2)) {
return BAD_VALUE;
}
// limit number of sources to 1 for now or 2 sources for special cross hw module case.
@@ -163,81 +130,73 @@
}
if (*handle != AUDIO_PATCH_HANDLE_NONE) {
- for (size_t index = 0; *handle != 0 && index < mPatches.size(); index++) {
- if (*handle == mPatches[index]->mHandle) {
- ALOGV("createAudioPatch() removing patch handle %d", *handle);
- halHandle = mPatches[index]->mHalHandle;
- Patch *removedPatch = mPatches[index];
- // free resources owned by the removed patch if applicable
- // 1) if a software patch is present, release the playback and capture threads and
- // tracks created. This will also release the corresponding audio HAL patches
- if ((removedPatch->mRecordPatchHandle
- != AUDIO_PATCH_HANDLE_NONE) ||
- (removedPatch->mPlaybackPatchHandle !=
- AUDIO_PATCH_HANDLE_NONE)) {
- clearPatchConnections(removedPatch);
- }
- // 2) if the new patch and old patch source or sink are devices from different
- // hw modules, clear the audio HAL patches now because they will not be updated
- // by call to create_audio_patch() below which will happen on a different HW module
- if (halHandle != AUDIO_PATCH_HANDLE_NONE) {
- audio_module_handle_t hwModule = AUDIO_MODULE_HANDLE_NONE;
- if ((removedPatch->mAudioPatch.sources[0].type == AUDIO_PORT_TYPE_DEVICE) &&
- ((patch->sources[0].type != AUDIO_PORT_TYPE_DEVICE) ||
- (removedPatch->mAudioPatch.sources[0].ext.device.hw_module !=
- patch->sources[0].ext.device.hw_module))) {
- hwModule = removedPatch->mAudioPatch.sources[0].ext.device.hw_module;
- } else if ((patch->num_sinks == 0) ||
- ((removedPatch->mAudioPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
- ((patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE) ||
- (removedPatch->mAudioPatch.sinks[0].ext.device.hw_module !=
- patch->sinks[0].ext.device.hw_module)))) {
- // Note on (patch->num_sinks == 0): this situation should not happen as
- // these special patches are only created by the policy manager but just
- // in case, systematically clear the HAL patch.
- // Note that removedPatch->mAudioPatch.num_sinks cannot be 0 here because
- // halHandle would be AUDIO_PATCH_HANDLE_NONE in this case.
- hwModule = removedPatch->mAudioPatch.sinks[0].ext.device.hw_module;
- }
- if (hwModule != AUDIO_MODULE_HANDLE_NONE) {
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(hwModule);
- if (index >= 0) {
- sp<DeviceHalInterface> hwDevice =
- audioflinger->mAudioHwDevs.valueAt(index)->hwDevice();
- hwDevice->releaseAudioPatch(halHandle);
- }
- }
- }
- mPatches.removeAt(index);
- delete removedPatch;
- break;
+ auto iter = mPatches.find(*handle);
+ if (iter != mPatches.end()) {
+ ALOGV("%s() removing patch handle %d", __func__, *handle);
+ Patch &removedPatch = iter->second;
+ // free resources owned by the removed patch if applicable
+ // 1) if a software patch is present, release the playback and capture threads and
+ // tracks created. This will also release the corresponding audio HAL patches
+ if (removedPatch.isSoftware()) {
+ removedPatch.clearConnections(this);
}
+ // 2) if the new patch and old patch source or sink are devices from different
+ // hw modules, clear the audio HAL patches now because they will not be updated
+ // by call to create_audio_patch() below which will happen on a different HW module
+ if (removedPatch.mHalHandle != AUDIO_PATCH_HANDLE_NONE) {
+ audio_module_handle_t hwModule = AUDIO_MODULE_HANDLE_NONE;
+ const struct audio_patch &oldPatch = removedPatch.mAudioPatch;
+ if (oldPatch.sources[0].type == AUDIO_PORT_TYPE_DEVICE &&
+ (patch->sources[0].type != AUDIO_PORT_TYPE_DEVICE ||
+ oldPatch.sources[0].ext.device.hw_module !=
+ patch->sources[0].ext.device.hw_module)) {
+ hwModule = oldPatch.sources[0].ext.device.hw_module;
+ } else if (patch->num_sinks == 0 ||
+ (oldPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE &&
+ (patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE ||
+ oldPatch.sinks[0].ext.device.hw_module !=
+ patch->sinks[0].ext.device.hw_module))) {
+ // Note on (patch->num_sinks == 0): this situation should not happen as
+ // these special patches are only created by the policy manager but just
+ // in case, systematically clear the HAL patch.
+ // Note that removedPatch.mAudioPatch.num_sinks cannot be 0 here because
+ // removedPatch.mHalHandle would be AUDIO_PATCH_HANDLE_NONE in this case.
+ hwModule = oldPatch.sinks[0].ext.device.hw_module;
+ }
+ sp<DeviceHalInterface> hwDevice = findHwDeviceByModule(hwModule);
+ if (hwDevice != 0) {
+ hwDevice->releaseAudioPatch(removedPatch.mHalHandle);
+ }
+ }
+ mPatches.erase(iter);
+ removeSoftwarePatchFromInsertedModules(*handle);
}
}
- Patch *newPatch = new Patch(patch);
+ Patch newPatch{*patch};
+ audio_module_handle_t insertedModule = AUDIO_MODULE_HANDLE_NONE;
switch (patch->sources[0].type) {
case AUDIO_PORT_TYPE_DEVICE: {
audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module;
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
- if (index < 0) {
- ALOGW("createAudioPatch() bad src hw module %d", srcModule);
+ AudioHwDevice *audioHwDevice = findAudioHwDeviceByModule(srcModule);
+ if (!audioHwDevice) {
status = BAD_VALUE;
goto exit;
}
- AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
for (unsigned int i = 0; i < patch->num_sinks; i++) {
// support only one sink if connection to a mix or across HW modules
if ((patch->sinks[i].type == AUDIO_PORT_TYPE_MIX ||
- patch->sinks[i].ext.mix.hw_module != srcModule) &&
+ (patch->sinks[i].type == AUDIO_PORT_TYPE_DEVICE &&
+ patch->sinks[i].ext.device.hw_module != srcModule)) &&
patch->num_sinks > 1) {
+ ALOGW("%s() multiple sinks for mix or across modules not supported", __func__);
status = INVALID_OPERATION;
goto exit;
}
// reject connection to different sink types
if (patch->sinks[i].type != patch->sinks[0].type) {
- ALOGW("createAudioPatch() different sink types in same patch not supported");
+ ALOGW("%s() different sink types in same patch not supported", __func__);
status = BAD_VALUE;
goto exit;
}
@@ -256,38 +215,52 @@
if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
(patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
patch->sources[1].ext.mix.hw_module)) {
- ALOGW("createAudioPatch() invalid source combination");
+ ALOGW("%s() invalid source combination", __func__);
status = INVALID_OPERATION;
goto exit;
}
sp<ThreadBase> thread =
- audioflinger->checkPlaybackThread_l(patch->sources[1].ext.mix.handle);
- newPatch->mPlaybackThread = (MixerThread *)thread.get();
+ mAudioFlinger.checkPlaybackThread_l(patch->sources[1].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() cannot get playback thread");
+ ALOGW("%s() cannot get playback thread", __func__);
status = INVALID_OPERATION;
goto exit;
}
+ // existing playback thread is reused, so it is not closed when patch is cleared
+ newPatch.mPlayback.setThread(
+ reinterpret_cast<PlaybackThread*>(thread.get()), false /*closeThread*/);
} else {
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
audio_devices_t device = patch->sinks[0].ext.device.type;
String8 address = String8(patch->sinks[0].ext.device.address);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- sp<ThreadBase> thread = audioflinger->openOutput_l(
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
+ if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+ config.sample_rate = patch->sinks[0].sample_rate;
+ }
+ if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+ config.channel_mask = patch->sinks[0].channel_mask;
+ }
+ if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+ config.format = patch->sinks[0].format;
+ }
+ if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+ flags = patch->sinks[0].flags.output;
+ }
+ sp<ThreadBase> thread = mAudioFlinger.openOutput_l(
patch->sinks[0].ext.device.hw_module,
&output,
&config,
device,
address,
- AUDIO_OUTPUT_FLAG_NONE);
- newPatch->mPlaybackThread = (PlaybackThread *)thread.get();
- ALOGV("audioflinger->openOutput_l() returned %p",
- newPatch->mPlaybackThread.get());
- if (newPatch->mPlaybackThread == 0) {
+ flags);
+ ALOGV("mAudioFlinger.openOutput_l() returned %p", thread.get());
+ if (thread == 0) {
status = NO_MEMORY;
goto exit;
}
+ newPatch.mPlayback.setThread(reinterpret_cast<PlaybackThread*>(thread.get()));
}
audio_devices_t device = patch->sources[0].ext.device.type;
String8 address = String8(patch->sources[0].ext.device.address);
@@ -297,47 +270,53 @@
if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
config.sample_rate = patch->sources[0].sample_rate;
} else {
- config.sample_rate = newPatch->mPlaybackThread->sampleRate();
+ config.sample_rate = newPatch.mPlayback.thread()->sampleRate();
}
if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
config.channel_mask = patch->sources[0].channel_mask;
} else {
- config.channel_mask =
- audio_channel_in_mask_from_count(newPatch->mPlaybackThread->channelCount());
+ config.channel_mask = audio_channel_in_mask_from_count(
+ newPatch.mPlayback.thread()->channelCount());
}
if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FORMAT) {
config.format = patch->sources[0].format;
} else {
- config.format = newPatch->mPlaybackThread->format();
+ config.format = newPatch.mPlayback.thread()->format();
}
+ audio_input_flags_t flags =
+ patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+ patch->sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- sp<ThreadBase> thread = audioflinger->openInput_l(srcModule,
+ sp<ThreadBase> thread = mAudioFlinger.openInput_l(srcModule,
&input,
&config,
device,
address,
AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE);
- newPatch->mRecordThread = (RecordThread *)thread.get();
- ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x",
- newPatch->mRecordThread.get(), config.channel_mask);
- if (newPatch->mRecordThread == 0) {
+ flags);
+ ALOGV("mAudioFlinger.openInput_l() returned %p inChannelMask %08x",
+ thread.get(), config.channel_mask);
+ if (thread == 0) {
status = NO_MEMORY;
goto exit;
}
- status = createPatchConnections(newPatch, patch);
+ newPatch.mRecord.setThread(reinterpret_cast<RecordThread*>(thread.get()));
+ status = newPatch.createConnections(this);
if (status != NO_ERROR) {
goto exit;
}
+ if (audioHwDevice->isInsert()) {
+ insertedModule = audioHwDevice->handle();
+ }
} else {
if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
- sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
+ sp<ThreadBase> thread = mAudioFlinger.checkRecordThread_l(
patch->sinks[0].ext.mix.handle);
if (thread == 0) {
- thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+ thread = mAudioFlinger.checkMmapThread_l(patch->sinks[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() bad capture I/O handle %d",
- patch->sinks[0].ext.mix.handle);
+ ALOGW("%s() bad capture I/O handle %d",
+ __func__, patch->sinks[0].ext.mix.handle);
status = BAD_VALUE;
goto exit;
}
@@ -356,9 +335,9 @@
} break;
case AUDIO_PORT_TYPE_MIX: {
audio_module_handle_t srcModule = patch->sources[0].ext.mix.hw_module;
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
+ ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(srcModule);
if (index < 0) {
- ALOGW("createAudioPatch() bad src hw module %d", srcModule);
+ ALOGW("%s() bad src hw module %d", __func__, srcModule);
status = BAD_VALUE;
goto exit;
}
@@ -366,8 +345,8 @@
audio_devices_t type = AUDIO_DEVICE_NONE;
for (unsigned int i = 0; i < patch->num_sinks; i++) {
if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
- ALOGW("createAudioPatch() invalid sink type %d for mix source",
- patch->sinks[i].type);
+ ALOGW("%s() invalid sink type %d for mix source",
+ __func__, patch->sinks[i].type);
status = BAD_VALUE;
goto exit;
}
@@ -379,21 +358,21 @@
type |= patch->sinks[i].ext.device.type;
}
sp<ThreadBase> thread =
- audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
+ mAudioFlinger.checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
if (thread == 0) {
- thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+ thread = mAudioFlinger.checkMmapThread_l(patch->sources[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() bad playback I/O handle %d",
- patch->sources[0].ext.mix.handle);
+ ALOGW("%s() bad playback I/O handle %d",
+ __func__, patch->sources[0].ext.mix.handle);
status = BAD_VALUE;
goto exit;
}
}
- if (thread == audioflinger->primaryPlaybackThread_l()) {
+ if (thread == mAudioFlinger.primaryPlaybackThread_l()) {
AudioParameter param = AudioParameter();
param.addInt(String8(AudioParameter::keyRouting), (int)type);
- audioflinger->broacastParametersToRecordThreads_l(param.toString());
+ mAudioFlinger.broacastParametersToRecordThreads_l(param.toString());
}
status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
@@ -403,295 +382,415 @@
goto exit;
}
exit:
- ALOGV("createAudioPatch() status %d", status);
+ ALOGV("%s() status %d", __func__, status);
if (status == NO_ERROR) {
- *handle = (audio_patch_handle_t) audioflinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
- newPatch->mHandle = *handle;
- newPatch->mHalHandle = halHandle;
- mPatches.add(newPatch);
- ALOGV("createAudioPatch() added new patch handle %d halHandle %d", *handle, halHandle);
+ *handle = (audio_patch_handle_t) mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
+ newPatch.mHalHandle = halHandle;
+ mPatches.insert(std::make_pair(*handle, std::move(newPatch)));
+ if (insertedModule != AUDIO_MODULE_HANDLE_NONE) {
+ addSoftwarePatchToInsertedModules(insertedModule, *handle);
+ }
+ ALOGV("%s() added new patch handle %d halHandle %d", __func__, *handle, halHandle);
} else {
- clearPatchConnections(newPatch);
- delete newPatch;
+ newPatch.clearConnections(this);
}
return status;
}
-status_t AudioFlinger::PatchPanel::createPatchConnections(Patch *patch,
- const struct audio_patch *audioPatch)
+AudioFlinger::PatchPanel::Patch::~Patch()
+{
+ ALOGE_IF(isSoftware(), "Software patch connections leaked %d %d",
+ mRecord.handle(), mPlayback.handle());
+}
+
+status_t AudioFlinger::PatchPanel::Patch::createConnections(PatchPanel *panel)
{
// create patch from source device to record thread input
- struct audio_patch subPatch;
- subPatch.num_sources = 1;
- subPatch.sources[0] = audioPatch->sources[0];
- subPatch.num_sinks = 1;
-
- patch->mRecordThread->getAudioPortConfig(&subPatch.sinks[0]);
- subPatch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_MIC;
-
- status_t status = createAudioPatch(&subPatch, &patch->mRecordPatchHandle);
+ status_t status = panel->createAudioPatch(
+ PatchBuilder().addSource(mAudioPatch.sources[0]).
+ addSink(mRecord.thread(), { .source = AUDIO_SOURCE_MIC }).patch(),
+ mRecord.handlePtr());
if (status != NO_ERROR) {
- patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ *mRecord.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
return status;
}
// create patch from playback thread output to sink device
- if (audioPatch->num_sinks != 0) {
- patch->mPlaybackThread->getAudioPortConfig(&subPatch.sources[0]);
- subPatch.sinks[0] = audioPatch->sinks[0];
- status = createAudioPatch(&subPatch, &patch->mPlaybackPatchHandle);
+ if (mAudioPatch.num_sinks != 0) {
+ status = panel->createAudioPatch(
+ PatchBuilder().addSource(mPlayback.thread()).addSink(mAudioPatch.sinks[0]).patch(),
+ mPlayback.handlePtr());
if (status != NO_ERROR) {
- patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ *mPlayback.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
return status;
}
} else {
- patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ *mPlayback.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
}
// use a pseudo LCM between input and output framecount
- size_t playbackFrameCount = patch->mPlaybackThread->frameCount();
+ size_t playbackFrameCount = mPlayback.thread()->frameCount();
int playbackShift = __builtin_ctz(playbackFrameCount);
- size_t recordFramecount = patch->mRecordThread->frameCount();
- int shift = __builtin_ctz(recordFramecount);
+ size_t recordFrameCount = mRecord.thread()->frameCount();
+ int shift = __builtin_ctz(recordFrameCount);
if (playbackShift < shift) {
shift = playbackShift;
}
- size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
- ALOGV("createPatchConnections() playframeCount %zu recordFramecount %zu frameCount %zu",
- playbackFrameCount, recordFramecount, frameCount);
+ size_t frameCount = (playbackFrameCount * recordFrameCount) >> shift;
+ ALOGV("%s() playframeCount %zu recordFrameCount %zu frameCount %zu",
+ __func__, playbackFrameCount, recordFrameCount, frameCount);
// create a special record track to capture from record thread
- uint32_t channelCount = patch->mPlaybackThread->channelCount();
+ uint32_t channelCount = mPlayback.thread()->channelCount();
audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
- audio_channel_mask_t outChannelMask = patch->mPlaybackThread->channelMask();
- uint32_t sampleRate = patch->mPlaybackThread->sampleRate();
- audio_format_t format = patch->mPlaybackThread->format();
+ audio_channel_mask_t outChannelMask = mPlayback.thread()->channelMask();
+ uint32_t sampleRate = mPlayback.thread()->sampleRate();
+ audio_format_t format = mPlayback.thread()->format();
- patch->mPatchRecord = new RecordThread::PatchRecord(
- patch->mRecordThread.get(),
+ audio_format_t inputFormat = mRecord.thread()->format();
+ if (!audio_is_linear_pcm(inputFormat)) {
+ // The playbackThread format will say PCM for IEC61937 packetized stream.
+ // Use recordThread format.
+ format = inputFormat;
+ }
+ audio_input_flags_t inputFlags = mAudioPatch.sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+ mAudioPatch.sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
+ if (sampleRate == mRecord.thread()->sampleRate() &&
+ inChannelMask == mRecord.thread()->channelMask() &&
+ mRecord.thread()->fastTrackAvailable() &&
+ mRecord.thread()->hasFastCapture()) {
+ // Create a fast track if the record thread has fast capture to get better performance.
+ // Only enable fast mode when there is no resample needed.
+ inputFlags = (audio_input_flags_t) (inputFlags | AUDIO_INPUT_FLAG_FAST);
+ } else {
+ // Fast mode is not available in this case.
+ inputFlags = (audio_input_flags_t) (inputFlags & ~AUDIO_INPUT_FLAG_FAST);
+ }
+ sp<RecordThread::PatchRecord> tempRecordTrack = new (std::nothrow) RecordThread::PatchRecord(
+ mRecord.thread().get(),
sampleRate,
inChannelMask,
format,
frameCount,
NULL,
(size_t)0 /* bufferSize */,
- AUDIO_INPUT_FLAG_NONE);
- if (patch->mPatchRecord == 0) {
- return NO_MEMORY;
- }
- status = patch->mPatchRecord->initCheck();
+ inputFlags);
+ status = mRecord.checkTrack(tempRecordTrack.get());
if (status != NO_ERROR) {
return status;
}
- patch->mRecordThread->addPatchRecord(patch->mPatchRecord);
+
+ audio_output_flags_t outputFlags = mAudioPatch.sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+ mAudioPatch.sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
+ audio_stream_type_t streamType = AUDIO_STREAM_PATCH;
+ if (mAudioPatch.num_sources == 2 && mAudioPatch.sources[1].type == AUDIO_PORT_TYPE_MIX) {
+ // "reuse one existing output mix" case
+ streamType = mAudioPatch.sources[1].ext.mix.usecase.stream;
+ }
+ if (mPlayback.thread()->hasFastMixer()) {
+ // Create a fast track if the playback thread has fast mixer to get better performance.
+ outputFlags = (audio_output_flags_t) (outputFlags | AUDIO_OUTPUT_FLAG_FAST);
+ }
// create a special playback track to render to playback thread.
// this track is given the same buffer as the PatchRecord buffer
- patch->mPatchTrack = new PlaybackThread::PatchTrack(
- patch->mPlaybackThread.get(),
- audioPatch->sources[1].ext.mix.usecase.stream,
+ sp<PlaybackThread::PatchTrack> tempPatchTrack = new (std::nothrow) PlaybackThread::PatchTrack(
+ mPlayback.thread().get(),
+ streamType,
sampleRate,
outChannelMask,
format,
frameCount,
- patch->mPatchRecord->buffer(),
- patch->mPatchRecord->bufferSize(),
- AUDIO_OUTPUT_FLAG_NONE);
- status = patch->mPatchTrack->initCheck();
+ tempRecordTrack->buffer(),
+ tempRecordTrack->bufferSize(),
+ outputFlags);
+ status = mPlayback.checkTrack(tempPatchTrack.get());
if (status != NO_ERROR) {
return status;
}
- patch->mPlaybackThread->addPatchTrack(patch->mPatchTrack);
// tie playback and record tracks together
- patch->mPatchRecord->setPeerProxy(patch->mPatchTrack.get());
- patch->mPatchTrack->setPeerProxy(patch->mPatchRecord.get());
+ mRecord.setTrackAndPeer(tempRecordTrack, tempPatchTrack.get());
+ mPlayback.setTrackAndPeer(tempPatchTrack, tempRecordTrack.get());
// start capture and playback
- patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
- patch->mPatchTrack->start();
+ mRecord.track()->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
+ mPlayback.track()->start();
return status;
}
-void AudioFlinger::PatchPanel::clearPatchConnections(Patch *patch)
+void AudioFlinger::PatchPanel::Patch::clearConnections(PatchPanel *panel)
{
- sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
- if (audioflinger == 0) {
- return;
+ ALOGV("%s() mRecord.handle %d mPlayback.handle %d",
+ __func__, mRecord.handle(), mPlayback.handle());
+ mRecord.stopTrack();
+ mPlayback.stopTrack();
+ mRecord.closeConnections(panel);
+ mPlayback.closeConnections(panel);
+}
+
+status_t AudioFlinger::PatchPanel::Patch::getLatencyMs(double *latencyMs) const
+{
+ if (!isSoftware()) return INVALID_OPERATION;
+
+ auto recordTrack = mRecord.const_track();
+ if (recordTrack.get() == nullptr) return INVALID_OPERATION;
+
+ auto playbackTrack = mPlayback.const_track();
+ if (playbackTrack.get() == nullptr) return INVALID_OPERATION;
+
+ // Latency information for tracks may be called without obtaining
+ // the underlying thread lock.
+ //
+ // We use record server latency + playback track latency (generally smaller than the
+ // reverse due to internal biases).
+ //
+ // TODO: is this stable enough? Consider a PatchTrack synchronized version of this.
+
+ // For PCM tracks get server latency.
+ if (audio_is_linear_pcm(recordTrack->format())) {
+ double recordServerLatencyMs, playbackTrackLatencyMs;
+ if (recordTrack->getServerLatencyMs(&recordServerLatencyMs) == OK
+ && playbackTrack->getTrackLatencyMs(&playbackTrackLatencyMs) == OK) {
+ *latencyMs = recordServerLatencyMs + playbackTrackLatencyMs;
+ return OK;
+ }
}
- ALOGV("clearPatchConnections() patch->mRecordPatchHandle %d patch->mPlaybackPatchHandle %d",
- patch->mRecordPatchHandle, patch->mPlaybackPatchHandle);
+ // See if kernel latencies are available.
+ // If so, do a frame diff and time difference computation to estimate
+ // the total patch latency. This requires that frame counts are reported by the
+ // HAL are matched properly in the case of record overruns and playback underruns.
+ ThreadBase::TrackBase::FrameTime recordFT{}, playFT{};
+ recordTrack->getKernelFrameTime(&recordFT);
+ playbackTrack->getKernelFrameTime(&playFT);
+ if (recordFT.timeNs > 0 && playFT.timeNs > 0) {
+ const int64_t frameDiff = recordFT.frames - playFT.frames;
+ const int64_t timeDiffNs = recordFT.timeNs - playFT.timeNs;
- if (patch->mPatchRecord != 0) {
- patch->mPatchRecord->stop();
- }
- if (patch->mPatchTrack != 0) {
- patch->mPatchTrack->stop();
- }
- if (patch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
- releaseAudioPatch(patch->mRecordPatchHandle);
- patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- }
- if (patch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
- releaseAudioPatch(patch->mPlaybackPatchHandle);
- patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- }
- if (patch->mRecordThread != 0) {
- if (patch->mPatchRecord != 0) {
- patch->mRecordThread->deletePatchRecord(patch->mPatchRecord);
+ // It is possible that the patch track and patch record have a large time disparity because
+ // one thread runs but another is stopped. We arbitrarily choose the maximum timestamp
+ // time difference based on how often we expect the timestamps to update in normal operation
+ // (typical should be no more than 50 ms).
+ //
+ // If the timestamps aren't sampled close enough, the patch latency is not
+ // considered valid.
+ //
+ // TODO: change this based on more experiments.
+ constexpr int64_t maxValidTimeDiffNs = 200 * NANOS_PER_MILLISECOND;
+ if (std::abs(timeDiffNs) < maxValidTimeDiffNs) {
+ *latencyMs = frameDiff * 1e3 / recordTrack->sampleRate()
+ - timeDiffNs * 1e-6;
+ return OK;
}
- audioflinger->closeInputInternal_l(patch->mRecordThread);
- }
- if (patch->mPlaybackThread != 0) {
- if (patch->mPatchTrack != 0) {
- patch->mPlaybackThread->deletePatchTrack(patch->mPatchTrack);
- }
- // if num sources == 2 we are reusing an existing playback thread so we do not close it
- if (patch->mAudioPatch.num_sources != 2) {
- audioflinger->closeOutputInternal_l(patch->mPlaybackThread);
- }
- }
- if (patch->mRecordThread != 0) {
- if (patch->mPatchRecord != 0) {
- patch->mPatchRecord.clear();
- }
- patch->mRecordThread.clear();
- }
- if (patch->mPlaybackThread != 0) {
- if (patch->mPatchTrack != 0) {
- patch->mPatchTrack.clear();
- }
- patch->mPlaybackThread.clear();
}
+ return INVALID_OPERATION;
+}
+
+String8 AudioFlinger::PatchPanel::Patch::dump(audio_patch_handle_t myHandle) const
+{
+ // TODO: Consider table dump form for patches, just like tracks.
+ String8 result = String8::format("Patch %d: thread %p => thread %p",
+ myHandle, mRecord.const_thread().get(), mPlayback.const_thread().get());
+
+ // add latency if it exists
+ double latencyMs;
+ if (getLatencyMs(&latencyMs) == OK) {
+ result.appendFormat(" latency: %.2lf", latencyMs);
+ }
+ return result;
}
/* Disconnect a patch */
status_t AudioFlinger::PatchPanel::releaseAudioPatch(audio_patch_handle_t handle)
{
- ALOGV("releaseAudioPatch handle %d", handle);
+ ALOGV("%s handle %d", __func__, handle);
status_t status = NO_ERROR;
- size_t index;
- sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
- if (audioflinger == 0) {
- return NO_INIT;
- }
-
- for (index = 0; index < mPatches.size(); index++) {
- if (handle == mPatches[index]->mHandle) {
- break;
- }
- }
- if (index == mPatches.size()) {
+ auto iter = mPatches.find(handle);
+ if (iter == mPatches.end()) {
return BAD_VALUE;
}
- Patch *removedPatch = mPatches[index];
- mPatches.removeAt(index);
+ Patch &removedPatch = iter->second;
+ const struct audio_patch &patch = removedPatch.mAudioPatch;
- struct audio_patch *patch = &removedPatch->mAudioPatch;
-
- switch (patch->sources[0].type) {
+ const struct audio_port_config &src = patch.sources[0];
+ switch (src.type) {
case AUDIO_PORT_TYPE_DEVICE: {
- audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module;
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
- if (index < 0) {
- ALOGW("releaseAudioPatch() bad src hw module %d", srcModule);
+ sp<DeviceHalInterface> hwDevice = findHwDeviceByModule(src.ext.device.hw_module);
+ if (hwDevice == 0) {
+ ALOGW("%s() bad src hw module %d", __func__, src.ext.device.hw_module);
status = BAD_VALUE;
break;
}
- if (removedPatch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE ||
- removedPatch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
- clearPatchConnections(removedPatch);
+ if (removedPatch.isSoftware()) {
+ removedPatch.clearConnections(this);
break;
}
- if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
- sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
- patch->sinks[0].ext.mix.handle);
+ if (patch.sinks[0].type == AUDIO_PORT_TYPE_MIX) {
+ audio_io_handle_t ioHandle = patch.sinks[0].ext.mix.handle;
+ sp<ThreadBase> thread = mAudioFlinger.checkRecordThread_l(ioHandle);
if (thread == 0) {
- thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+ thread = mAudioFlinger.checkMmapThread_l(ioHandle);
if (thread == 0) {
- ALOGW("releaseAudioPatch() bad capture I/O handle %d",
- patch->sinks[0].ext.mix.handle);
+ ALOGW("%s() bad capture I/O handle %d", __func__, ioHandle);
status = BAD_VALUE;
break;
}
}
- status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
+ status = thread->sendReleaseAudioPatchConfigEvent(removedPatch.mHalHandle);
} else {
- AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
- sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
- status = hwDevice->releaseAudioPatch(removedPatch->mHalHandle);
+ status = hwDevice->releaseAudioPatch(removedPatch.mHalHandle);
}
} break;
case AUDIO_PORT_TYPE_MIX: {
- audio_module_handle_t srcModule = patch->sources[0].ext.mix.hw_module;
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
- if (index < 0) {
- ALOGW("releaseAudioPatch() bad src hw module %d", srcModule);
+ if (findHwDeviceByModule(src.ext.mix.hw_module) == 0) {
+ ALOGW("%s() bad src hw module %d", __func__, src.ext.mix.hw_module);
status = BAD_VALUE;
break;
}
- sp<ThreadBase> thread =
- audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
+ audio_io_handle_t ioHandle = src.ext.mix.handle;
+ sp<ThreadBase> thread = mAudioFlinger.checkPlaybackThread_l(ioHandle);
if (thread == 0) {
- thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+ thread = mAudioFlinger.checkMmapThread_l(ioHandle);
if (thread == 0) {
- ALOGW("releaseAudioPatch() bad playback I/O handle %d",
- patch->sources[0].ext.mix.handle);
+ ALOGW("%s() bad playback I/O handle %d", __func__, ioHandle);
status = BAD_VALUE;
break;
}
}
- status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
+ status = thread->sendReleaseAudioPatchConfigEvent(removedPatch.mHalHandle);
} break;
default:
status = BAD_VALUE;
- break;
}
- delete removedPatch;
+ mPatches.erase(iter);
+ removeSoftwarePatchFromInsertedModules(handle);
return status;
}
-
/* List connected audio ports and they attributes */
status_t AudioFlinger::PatchPanel::listAudioPatches(unsigned int *num_patches __unused,
struct audio_patch *patches __unused)
{
- ALOGV("listAudioPatches");
+ ALOGV(__func__);
return NO_ERROR;
}
-/* Set audio port configuration */
-status_t AudioFlinger::PatchPanel::setAudioPortConfig(const struct audio_port_config *config)
+status_t AudioFlinger::PatchPanel::getDownstreamSoftwarePatches(
+ audio_io_handle_t stream,
+ std::vector<AudioFlinger::PatchPanel::SoftwarePatch> *patches) const
{
- ALOGV("setAudioPortConfig");
-
- sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
- if (audioflinger == 0) {
- return NO_INIT;
+ for (const auto& module : mInsertedModules) {
+ if (module.second.streams.count(stream)) {
+ for (const auto& patchHandle : module.second.sw_patches) {
+ const auto& patch_iter = mPatches.find(patchHandle);
+ if (patch_iter != mPatches.end()) {
+ const Patch &patch = patch_iter->second;
+ patches->emplace_back(*this, patchHandle,
+ patch.mPlayback.const_thread()->id(),
+ patch.mRecord.const_thread()->id());
+ } else {
+ ALOGE("Stale patch handle in the cache: %d", patchHandle);
+ }
+ }
+ return OK;
+ }
}
+ // The stream is not associated with any of inserted modules.
+ return BAD_VALUE;
+}
- audio_module_handle_t module;
- if (config->type == AUDIO_PORT_TYPE_DEVICE) {
- module = config->ext.device.hw_module;
- } else {
- module = config->ext.mix.hw_module;
+void AudioFlinger::PatchPanel::notifyStreamOpened(
+ AudioHwDevice *audioHwDevice, audio_io_handle_t stream)
+{
+ if (audioHwDevice->isInsert()) {
+ mInsertedModules[audioHwDevice->handle()].streams.insert(stream);
}
+}
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(module);
+void AudioFlinger::PatchPanel::notifyStreamClosed(audio_io_handle_t stream)
+{
+ for (auto& module : mInsertedModules) {
+ module.second.streams.erase(stream);
+ }
+}
+
+AudioHwDevice* AudioFlinger::PatchPanel::findAudioHwDeviceByModule(audio_module_handle_t module)
+{
+ if (module == AUDIO_MODULE_HANDLE_NONE) return nullptr;
+ ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(module);
if (index < 0) {
- ALOGW("setAudioPortConfig() bad hw module %d", module);
- return BAD_VALUE;
+ ALOGW("%s() bad hw module %d", __func__, module);
+ return nullptr;
+ }
+ return mAudioFlinger.mAudioHwDevs.valueAt(index);
+}
+
+sp<DeviceHalInterface> AudioFlinger::PatchPanel::findHwDeviceByModule(audio_module_handle_t module)
+{
+ AudioHwDevice *audioHwDevice = findAudioHwDeviceByModule(module);
+ return audioHwDevice ? audioHwDevice->hwDevice() : nullptr;
+}
+
+void AudioFlinger::PatchPanel::addSoftwarePatchToInsertedModules(
+ audio_module_handle_t module, audio_patch_handle_t handle)
+{
+ mInsertedModules[module].sw_patches.insert(handle);
+}
+
+void AudioFlinger::PatchPanel::removeSoftwarePatchFromInsertedModules(
+ audio_patch_handle_t handle)
+{
+ for (auto& module : mInsertedModules) {
+ module.second.sw_patches.erase(handle);
+ }
+}
+
+void AudioFlinger::PatchPanel::dump(int fd) const
+{
+ String8 patchPanelDump;
+ const char *indent = " ";
+
+ // Only dump software patches.
+ bool headerPrinted = false;
+ for (const auto& iter : mPatches) {
+ if (iter.second.isSoftware()) {
+ if (!headerPrinted) {
+ patchPanelDump += "\nSoftware patches:\n";
+ headerPrinted = true;
+ }
+ patchPanelDump.appendFormat("%s%s\n", indent, iter.second.dump(iter.first).string());
+ }
}
- AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
- return audioHwDevice->hwDevice()->setAudioPortConfig(config);
+ headerPrinted = false;
+ for (const auto& module : mInsertedModules) {
+ if (!module.second.streams.empty() || !module.second.sw_patches.empty()) {
+ if (!headerPrinted) {
+ patchPanelDump += "\nTracked inserted modules:\n";
+ headerPrinted = true;
+ }
+ String8 moduleDump = String8::format("Module %d: I/O handles: ", module.first);
+ for (const auto& stream : module.second.streams) {
+ moduleDump.appendFormat("%d ", stream);
+ }
+ moduleDump.append("; SW Patches: ");
+ for (const auto& patch : module.second.sw_patches) {
+ moduleDump.appendFormat("%d ", patch);
+ }
+ patchPanelDump.appendFormat("%s%s\n", indent, moduleDump.string());
+ }
+ }
+
+ if (!patchPanelDump.isEmpty()) {
+ write(fd, patchPanelDump.string(), patchPanelDump.size());
+ }
}
} // namespace android
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index d37c0d3..2d9bd8e 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -19,13 +19,33 @@
#error This header file should only be included from AudioFlinger.h
#endif
-class PatchPanel : public RefBase {
+
+// PatchPanel is concealed within AudioFlinger, their lifetimes are the same.
+class PatchPanel {
public:
+ class SoftwarePatch {
+ public:
+ SoftwarePatch(const PatchPanel &patchPanel, audio_patch_handle_t patchHandle,
+ audio_io_handle_t playbackThreadHandle, audio_io_handle_t recordThreadHandle)
+ : mPatchPanel(patchPanel), mPatchHandle(patchHandle),
+ mPlaybackThreadHandle(playbackThreadHandle),
+ mRecordThreadHandle(recordThreadHandle) {}
+ SoftwarePatch(const SoftwarePatch&) = default;
+ SoftwarePatch& operator=(const SoftwarePatch&) = default;
- class Patch;
+ // Must be called under AudioFlinger::mLock
+ status_t getLatencyMs_l(double *latencyMs) const;
+ audio_patch_handle_t getPatchHandle() const { return mPatchHandle; };
+ audio_io_handle_t getPlaybackThreadHandle() const { return mPlaybackThreadHandle; };
+ audio_io_handle_t getRecordThreadHandle() const { return mRecordThreadHandle; };
+ private:
+ const PatchPanel &mPatchPanel;
+ const audio_patch_handle_t mPatchHandle;
+ const audio_io_handle_t mPlaybackThreadHandle;
+ const audio_io_handle_t mRecordThreadHandle;
+ };
- explicit PatchPanel(const sp<AudioFlinger>& audioFlinger);
- virtual ~PatchPanel();
+ explicit PatchPanel(AudioFlinger* audioFlinger) : mAudioFlinger(*audioFlinger) {}
/* List connected audio ports and their attributes */
status_t listAudioPorts(unsigned int *num_ports,
@@ -45,46 +65,144 @@
status_t listAudioPatches(unsigned int *num_patches,
struct audio_patch *patches);
- /* Set audio port configuration */
- status_t setAudioPortConfig(const struct audio_port_config *config);
+ // Retrieves all currently estrablished software patches for a stream
+ // opened on an intermediate module.
+ status_t getDownstreamSoftwarePatches(audio_io_handle_t stream,
+ std::vector<SoftwarePatch> *patches) const;
- status_t createPatchConnections(Patch *patch,
- const struct audio_patch *audioPatch);
- void clearPatchConnections(Patch *patch);
+ // Notifies patch panel about all opened and closed streams.
+ void notifyStreamOpened(AudioHwDevice *audioHwDevice, audio_io_handle_t stream);
+ void notifyStreamClosed(audio_io_handle_t stream);
+
+ void dump(int fd) const;
+
+private:
+ template<typename ThreadType, typename TrackType>
+ class Endpoint {
+ public:
+ Endpoint() = default;
+ Endpoint(Endpoint&& other) { *this = std::move(other); }
+ Endpoint& operator=(Endpoint&& other) {
+ ALOGE_IF(mHandle != AUDIO_PATCH_HANDLE_NONE,
+ "A non empty Patch Endpoint leaked, handle %d", mHandle);
+ *this = other;
+ other.mHandle = AUDIO_PATCH_HANDLE_NONE;
+ return *this;
+ }
+
+ status_t checkTrack(TrackType *trackOrNull) const {
+ if (trackOrNull == nullptr) return NO_MEMORY;
+ return trackOrNull->initCheck();
+ }
+ audio_patch_handle_t handle() const { return mHandle; }
+ sp<ThreadType> thread() { return mThread; }
+ sp<TrackType> track() { return mTrack; }
+ sp<const ThreadType> const_thread() const { return mThread; }
+ sp<const TrackType> const_track() const { return mTrack; }
+
+ void closeConnections(PatchPanel *panel) {
+ if (mHandle != AUDIO_PATCH_HANDLE_NONE) {
+ panel->releaseAudioPatch(mHandle);
+ mHandle = AUDIO_PATCH_HANDLE_NONE;
+ }
+ if (mThread != 0) {
+ if (mTrack != 0) {
+ mThread->deletePatchTrack(mTrack);
+ }
+ if (mCloseThread) {
+ panel->mAudioFlinger.closeThreadInternal_l(mThread);
+ }
+ }
+ }
+ audio_patch_handle_t* handlePtr() { return &mHandle; }
+ void setThread(const sp<ThreadType>& thread, bool closeThread = true) {
+ mThread = thread;
+ mCloseThread = closeThread;
+ }
+ void setTrackAndPeer(const sp<TrackType>& track,
+ ThreadBase::PatchProxyBufferProvider *peer) {
+ mTrack = track;
+ mThread->addPatchTrack(mTrack);
+ mTrack->setPeerProxy(peer);
+ }
+ void stopTrack() { if (mTrack) mTrack->stop(); }
+
+ private:
+ Endpoint(const Endpoint&) = default;
+ Endpoint& operator=(const Endpoint&) = default;
+
+ sp<ThreadType> mThread;
+ bool mCloseThread = true;
+ audio_patch_handle_t mHandle = AUDIO_PATCH_HANDLE_NONE;
+ sp<TrackType> mTrack;
+ };
class Patch {
public:
- explicit Patch(const struct audio_patch *patch) :
- mAudioPatch(*patch), mHandle(AUDIO_PATCH_HANDLE_NONE),
- mHalHandle(AUDIO_PATCH_HANDLE_NONE), mRecordPatchHandle(AUDIO_PATCH_HANDLE_NONE),
- mPlaybackPatchHandle(AUDIO_PATCH_HANDLE_NONE) {}
- ~Patch() {}
+ explicit Patch(const struct audio_patch &patch) : mAudioPatch(patch) {}
+ ~Patch();
+ Patch(const Patch&) = delete;
+ Patch(Patch&&) = default;
+ Patch& operator=(const Patch&) = delete;
+ Patch& operator=(Patch&&) = default;
+ status_t createConnections(PatchPanel *panel);
+ void clearConnections(PatchPanel *panel);
+ bool isSoftware() const {
+ return mRecord.handle() != AUDIO_PATCH_HANDLE_NONE ||
+ mPlayback.handle() != AUDIO_PATCH_HANDLE_NONE; }
+
+ // returns the latency of the patch (from record to playback).
+ status_t getLatencyMs(double *latencyMs) const;
+
+ String8 dump(audio_patch_handle_t myHandle) const;
+
+ // Note that audio_patch::id is only unique within a HAL module
struct audio_patch mAudioPatch;
- audio_patch_handle_t mHandle;
// handle for audio HAL patch handle present only when the audio HAL version is >= 3.0
- audio_patch_handle_t mHalHandle;
+ audio_patch_handle_t mHalHandle = AUDIO_PATCH_HANDLE_NONE;
// below members are used by a software audio patch connecting a source device from a
// given audio HW module to a sink device on an other audio HW module.
- // playback thread created by createAudioPatch() and released by clearPatchConnections() if
- // no existing playback thread can be used by the software patch
- sp<PlaybackThread> mPlaybackThread;
- // audio track created by createPatchConnections() and released by clearPatchConnections()
- sp<PlaybackThread::PatchTrack> mPatchTrack;
- // record thread created by createAudioPatch() and released by clearPatchConnections()
- sp<RecordThread> mRecordThread;
- // audio record created by createPatchConnections() and released by clearPatchConnections()
- sp<RecordThread::PatchRecord> mPatchRecord;
- // handle for audio patch connecting source device to record thread input.
- // created by createPatchConnections() and released by clearPatchConnections()
- audio_patch_handle_t mRecordPatchHandle;
- // handle for audio patch connecting playback thread output to sink device
- // created by createPatchConnections() and released by clearPatchConnections()
- audio_patch_handle_t mPlaybackPatchHandle;
-
+ // the objects are created by createConnections() and released by clearConnections()
+ // playback thread is created if no existing playback thread can be used
+ // connects playback thread output to sink device
+ Endpoint<PlaybackThread, PlaybackThread::PatchTrack> mPlayback;
+ // connects source device to record thread input
+ Endpoint<RecordThread, RecordThread::PatchRecord> mRecord;
};
-private:
- const wp<AudioFlinger> mAudioFlinger;
- SortedVector <Patch *> mPatches;
+ AudioHwDevice* findAudioHwDeviceByModule(audio_module_handle_t module);
+ sp<DeviceHalInterface> findHwDeviceByModule(audio_module_handle_t module);
+ void addSoftwarePatchToInsertedModules(
+ audio_module_handle_t module, audio_patch_handle_t handle);
+ void removeSoftwarePatchFromInsertedModules(audio_patch_handle_t handle);
+
+ AudioFlinger &mAudioFlinger;
+ std::map<audio_patch_handle_t, Patch> mPatches;
+
+ // This map allows going from a thread to "downstream" software patches
+ // when a processing module inserted in between. Example:
+ //
+ // from map value.streams map key
+ // [Mixer thread] --> [Virtual output device] --> [Processing module] ---\
+ // [Harware module] <-- [Physical output device] <-- [S/W Patch] <--/
+ // from map value.sw_patches
+ //
+ // This allows the mixer thread to look up the threads of the software patch
+ // for propagating timing info, parameters, etc.
+ //
+ // The current assumptions are:
+ // 1) The processing module acts as a mixer with several outputs which
+ // represent differently downmixed and / or encoded versions of the same
+ // mixed stream. There is no 1:1 correspondence between the input streams
+ // and the software patches, but rather a N:N correspondence between
+ // a group of streams and a group of patches.
+ // 2) There are only a couple of inserted processing modules in the system,
+ // so when looking for a stream or patch handle we can iterate over
+ // all modules.
+ struct ModuleConnections {
+ std::set<audio_io_handle_t> streams;
+ std::set<audio_patch_handle_t> sw_patches;
+ };
+ std::map<audio_module_handle_t, ModuleConnections> mInsertedModules;
};
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index a78be99..4d5f6b0 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -41,7 +41,7 @@
virtual ~Track();
virtual status_t initCheck() const;
- static void appendDumpHeader(String8& result);
+ void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
virtual status_t start(AudioSystem::sync_event_t event =
AudioSystem::SYNC_EVENT_NONE,
@@ -56,6 +56,12 @@
LOG_ALWAYS_FATAL_IF(mName >= 0 && name >= 0,
"%s both old name %d and new name %d are valid", __func__, mName, name);
mName = name;
+#ifdef TEE_SINK
+ mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+ + "_" + std::to_string(mId)
+ + "_" + std::to_string(mName)
+ + "_T");
+#endif
}
virtual uint32_t sampleRate() const;
@@ -65,10 +71,12 @@
}
bool isOffloaded() const
{ return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
- bool isDirect() const { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
+ bool isDirect() const override
+ { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
bool isOffloadedOrDirect() const { return (mFlags
& (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
| AUDIO_OUTPUT_FLAG_DIRECT)) != 0; }
+ bool isStatic() const { return mSharedBuffer.get() != nullptr; }
status_t setParameters(const String8& keyValuePairs);
status_t attachAuxEffect(int EffectId);
@@ -87,6 +95,10 @@
virtual bool isFastTrack() const { return (mFlags & AUDIO_OUTPUT_FLAG_FAST) != 0; }
+ double bufferLatencyMs() const override {
+ return isStatic() ? 0. : TrackBase::bufferLatencyMs();
+ }
+
// implement volume handling.
media::VolumeShaper::Status applyVolumeShaper(
const sp<media::VolumeShaper::Configuration>& configuration,
@@ -140,7 +152,7 @@
bool isResumePending();
void resumeAck();
void updateTrackFrameInfo(int64_t trackFramesReleased, int64_t sinkFramesWritten,
- const ExtendedTimestamp &timeStamp);
+ uint32_t halSampleRate, const ExtendedTimestamp &timeStamp);
sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
@@ -233,7 +245,7 @@
AudioSystem::SYNC_EVENT_NONE,
audio_session_t triggerSession = AUDIO_SESSION_NONE);
virtual void stop();
- bool write(void* data, uint32_t frames);
+ ssize_t write(void* data, uint32_t frames);
bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
bool isActive() const { return mActive; }
const wp<ThreadBase>& thread() const { return mThread; }
@@ -241,6 +253,18 @@
void copyMetadataTo(MetadataInserter& backInserter) const override;
/** Set the metadatas of the upstream tracks. Thread safe. */
void setMetadatas(const SourceMetadatas& metadatas);
+ /** returns client timestamp to the upstream duplicating thread. */
+ ExtendedTimestamp getClientProxyTimestamp() const {
+ // server - kernel difference is not true latency when drained
+ // i.e. mServerProxy->isDrained().
+ ExtendedTimestamp timestamp;
+ (void) mClientProxy->getTimestamp(×tamp);
+ // On success, the timestamp LOCATION_SERVER and LOCATION_KERNEL
+ // entries will be properly filled. If getTimestamp()
+ // is unsuccessful, then a default initialized timestamp
+ // (with mTimeNs[] filled with -1's) is returned.
+ return timestamp;
+ }
private:
status_t obtainBuffer(AudioBufferProvider::Buffer* buffer,
@@ -257,6 +281,7 @@
bool mActive;
DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
sp<AudioTrackClientProxy> mClientProxy;
+
/** Attributes of the source tracks.
*
* This member must be accessed with mTrackMetadatasMutex taken.
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index fc2dbbb..b0c9fda 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -51,7 +51,7 @@
bool setOverflow() { bool tmp = mOverflow; mOverflow = true;
return tmp; }
- static void appendDumpHeader(String8& result);
+ void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
void handleSyncStartEvent(const sp<SyncEvent>& event);
@@ -63,6 +63,8 @@
const ExtendedTimestamp ×tamp);
virtual bool isFastTrack() const { return (mFlags & AUDIO_INPUT_FLAG_FAST) != 0; }
+ bool isDirect() const override
+ { return (mFlags & AUDIO_INPUT_FLAG_DIRECT) != 0; }
void setSilenced(bool silenced) { if (!isPatchTrack()) mSilenced = silenced; }
bool isSilenced() const { return mSilenced; }
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
deleted file mode 100644
index f45ada1..0000000
--- a/services/audioflinger/ServiceUtilities.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <unistd.h>
-
-#include <binder/PermissionController.h>
-
-namespace android {
-
-extern pid_t getpid_cached;
-bool isTrustedCallingUid(uid_t uid);
-bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
-bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
-void finishRecording(const String16& opPackageName, uid_t uid);
-bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
-bool captureHotwordAllowed(pid_t pid, uid_t uid);
-bool settingsAllowed();
-bool modifyAudioRoutingAllowed();
-bool dumpAllowed();
-bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
-}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 4ca50d7..b5f61e7 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -23,6 +23,8 @@
#include "Configuration.h"
#include <math.h>
#include <fcntl.h>
+#include <memory>
+#include <string>
#include <linux/futex.h>
#include <sys/stat.h>
#include <sys/syscall.h>
@@ -40,6 +42,7 @@
#include <audio_utils/primitives.h>
#include <audio_utils/format.h>
#include <audio_utils/minifloat.h>
+#include <json/json.h>
#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_aec.h>
#include <system/audio.h>
@@ -62,8 +65,8 @@
#include "AudioFlinger.h"
#include "FastMixer.h"
#include "FastCapture.h"
-#include "ServiceUtilities.h"
-#include "mediautils/SchedulingPolicyService.h"
+#include <mediautils/SchedulingPolicyService.h>
+#include <mediautils/ServiceUtilities.h>
#ifdef ADD_BATTERY_DATA
#include <media/IMediaPlayerService.h>
@@ -71,7 +74,7 @@
#endif
#ifdef DEBUG_CPU_USAGE
-#include <cpustats/CentralTendencyStatistics.h>
+#include <audio_utils/Statistics.h>
#include <cpustats/ThreadCpuUsage.h>
#endif
@@ -200,7 +203,7 @@
// Initially this heap is used to allocate client buffers for "fast" AudioRecord.
// Eventually it will be the single buffer that FastCapture writes into via HAL read(),
// and that all "fast" AudioRecord clients read from. In either case, the size can be small.
-static const size_t kRecordThreadReadOnlyHeapSize = 0x4000;
+static const size_t kRecordThreadReadOnlyHeapSize = 0xD000;
// ----------------------------------------------------------------------------
@@ -333,9 +336,9 @@
#ifdef DEBUG_CPU_USAGE
private:
ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns
- CentralTendencyStatistics mWcStats; // statistics on thread CPU usage in wall clock ns
+ audio_utils::Statistics<double> mWcStats; // statistics on thread CPU usage in wall clock ns
- CentralTendencyStatistics mHzStats; // statistics on thread CPU usage in cycles
+ audio_utils::Statistics<double> mHzStats; // statistics on thread CPU usage in cycles
int mCpuNum; // thread's current CPU number
int mCpukHz; // frequency of thread's current CPU in kHz
@@ -361,7 +364,7 @@
// record sample for wall clock statistics
if (valid) {
- mWcStats.sample(wcNs);
+ mWcStats.add(wcNs);
}
// get the current CPU number
@@ -380,26 +383,26 @@
// if no change in CPU number or frequency, then record sample for cycle statistics
if (valid && mCpukHz > 0) {
- double cycles = wcNs * cpukHz * 0.000001;
- mHzStats.sample(cycles);
+ const double cycles = wcNs * cpukHz * 0.000001;
+ mHzStats.add(cycles);
}
- unsigned n = mWcStats.n();
+ const unsigned n = mWcStats.getN();
// mCpuUsage.elapsed() is expensive, so don't call it every loop
if ((n & 127) == 1) {
- long long elapsed = mCpuUsage.elapsed();
+ const long long elapsed = mCpuUsage.elapsed();
if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
- double perLoop = elapsed / (double) n;
- double perLoop100 = perLoop * 0.01;
- double perLoop1k = perLoop * 0.001;
- double mean = mWcStats.mean();
- double stddev = mWcStats.stddev();
- double minimum = mWcStats.minimum();
- double maximum = mWcStats.maximum();
- double meanCycles = mHzStats.mean();
- double stddevCycles = mHzStats.stddev();
- double minCycles = mHzStats.minimum();
- double maxCycles = mHzStats.maximum();
+ const double perLoop = elapsed / (double) n;
+ const double perLoop100 = perLoop * 0.01;
+ const double perLoop1k = perLoop * 0.001;
+ const double mean = mWcStats.getMean();
+ const double stddev = mWcStats.getStdDev();
+ const double minimum = mWcStats.getMin();
+ const double maximum = mWcStats.getMax();
+ const double meanCycles = mHzStats.getMean();
+ const double stddevCycles = mHzStats.getStdDev();
+ const double minCycles = mHzStats.getMin();
+ const double maxCycles = mHzStats.getMax();
mCpuUsage.resetElapsed();
mWcStats.reset();
mHzStats.reset();
@@ -769,6 +772,8 @@
if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
+ if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, " );
+ if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, " );
if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
} else {
if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
@@ -783,6 +788,12 @@
if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
+ if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, ");
+ if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, ");
+ if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, ");
+ if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low freq, ");
+ if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, " );
+ if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, " );
if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
@@ -845,6 +856,16 @@
dprintf(fd, " Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
+ // Dump timestamp statistics for the Thread types that support it.
+ if (mType == RECORD
+ || mType == MIXER
+ || mType == DUPLICATING
+ || mType == DIRECT
+ || mType == OFFLOAD) {
+ dprintf(fd, " Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
+ dprintf(fd, " Timestamp corrected: %s\n", isTimestampCorrectionEnabled() ? "yes" : "no");
+ }
+
if (locked) {
mLock.unlock();
}
@@ -1519,7 +1540,7 @@
}
}
-void AudioFlinger::ThreadBase::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::ThreadBase::toAudioPortConfig(struct audio_port_config *config)
{
config->type = AUDIO_PORT_TYPE_MIX;
config->ext.mix.handle = mId;
@@ -1571,6 +1592,9 @@
--mBatteryCounter[track->uid()].second;
// mLatestActiveTrack is not cleared even if is the same as track.
mHasChanged = true;
+#ifdef TEE_SINK
+ track->dumpTee(-1 /* fd */, "_REMOVE");
+#endif
return index;
}
@@ -1709,10 +1733,21 @@
if (mOutput->audioHwDev->canSetMasterMute()) {
mMasterMute = false;
}
+ mIsMsdDevice = strcmp(
+ mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
}
readOutputParameters_l();
+ // TODO: We may also match on address as well as device type for
+ // AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+ if (type == MIXER || type == DIRECT) {
+ mTimestampCorrectedDevices = (audio_devices_t)property_get_int64(
+ "audio.timestamp.corrected_output_devices",
+ (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_OUT_BUS // turn on by default for MSD
+ : AUDIO_DEVICE_NONE));
+ }
+
// ++ operator does not compile
for (audio_stream_type_t stream = AUDIO_STREAM_MIN; stream < AUDIO_STREAM_FOR_POLICY_CNT;
stream = (audio_stream_type_t) (stream + 1)) {
@@ -1741,6 +1776,11 @@
mLocalLog.dump(fd, " " /* prefix */, 40 /* lines */);
}
+Json::Value AudioFlinger::PlaybackThread::getJsonDump() const
+{
+ return Json::Value(Json::objectValue);
+}
+
void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused)
{
String8 result;
@@ -1773,7 +1813,7 @@
if (numtracks) {
dprintf(fd, " of which %zu are active\n", numactive);
result.append(prefix);
- Track::appendDumpHeader(result);
+ mTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numtracks; ++i) {
sp<Track> track = mTracks[i];
if (track != 0) {
@@ -1793,7 +1833,7 @@
result.append(" The following tracks are in the active list but"
" not in the track list\n");
result.append(prefix);
- Track::appendDumpHeader(result);
+ mActiveTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numactive; ++i) {
sp<Track> track = mActiveTracks[i];
if (mTracks.indexOf(track) < 0) {
@@ -1880,11 +1920,17 @@
status_t lStatus;
audio_output_flags_t outputFlags = mOutput->flags;
audio_output_flags_t requestedFlags = *flags;
+ uint32_t sampleRate;
+
+ if (sharedBuffer != 0 && checkIMemory(sharedBuffer) != NO_ERROR) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
if (*pSampleRate == 0) {
*pSampleRate = mSampleRate;
}
- uint32_t sampleRate = *pSampleRate;
+ sampleRate = *pSampleRate;
// special case for FAST flag considered OK if fast mixer is present
if (hasFastMixer()) {
@@ -2303,15 +2349,13 @@
if (track->isExternalTrack()) {
TrackBase::track_state state = track->mState;
mLock.unlock();
- status = AudioSystem::startOutput(mId, track->streamType(),
- track->sessionId());
+ status = AudioSystem::startOutput(track->portId());
mLock.lock();
// abort track was stopped/paused while we released the lock
if (state != track->mState) {
if (status == NO_ERROR) {
mLock.unlock();
- AudioSystem::stopOutput(mId, track->streamType(),
- track->sessionId());
+ AudioSystem::stopOutput(track->portId());
mLock.lock();
}
return INVALID_OPERATION;
@@ -2462,6 +2506,11 @@
Mutex::Autolock _l(mLock);
// reject out of sequence requests
if ((mDrainSequence & 1) && (sequence == mDrainSequence)) {
+ // Register discontinuity when HW drain is completed because that can cause
+ // the timestamp frame position to reset to 0 for direct and offload threads.
+ // (Out of sequence requests are ignored, since the discontinuity would be handled
+ // elsewhere, e.g. in flush).
+ mTimestampVerifier.discontinuity();
mDrainSequence &= ~1;
mWaitWorkCV.signal();
}
@@ -2786,15 +2835,13 @@
for (size_t i = 0 ; i < count ; i++) {
const sp<Track>& track = tracksToRemove.itemAt(i);
if (track->isExternalTrack()) {
- AudioSystem::stopOutput(mId, track->streamType(),
- track->sessionId());
+ AudioSystem::stopOutput(track->portId());
#ifdef ADD_BATTERY_DATA
// to track the speaker usage
addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
#endif
if (track->isTerminated()) {
- AudioSystem::releaseOutput(mId, track->streamType(),
- track->sessionId());
+ AudioSystem::releaseOutput(track->portId());
}
}
}
@@ -2850,6 +2897,9 @@
ATRACE_END();
if (framesWritten > 0) {
bytesWritten = framesWritten * mFrameSize;
+#ifdef TEE_SINK
+ mTee.write((char *)mSinkBuffer + offset, framesWritten);
+#endif
} else {
bytesWritten = framesWritten;
}
@@ -3167,6 +3217,17 @@
checkSilentMode_l();
+ // DIRECT and OFFLOAD threads should reset frame count to zero on stop/flush
+ // TODO: add confirmation checks:
+ // 1) DIRECT threads and linear PCM format really resets to 0?
+ // 2) Is frame count really valid if not linear pcm?
+ // 3) Are all 64 bits of position returned, not just lowest 32 bits?
+ if (mType == OFFLOAD || mType == DIRECT) {
+ mTimestampVerifier.setDiscontinuityMode(mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
+ }
+ audio_utils::Statistics<double> downstreamLatencyStatMs(0.999 /* alpha */);
+ audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+
while (!exitPending())
{
// Log merge requests are performed during AudioFlinger binder transactions, but
@@ -3177,6 +3238,46 @@
Vector< sp<EffectChain> > effectChains;
+ // If the device is AUDIO_DEVICE_OUT_BUS, check for downstream latency.
+ //
+ // Note: we access outDevice() outside of mLock.
+ if (isMsdDevice() && (outDevice() & AUDIO_DEVICE_OUT_BUS) != 0) {
+ // Here, we try for the AF lock, but do not block on it as the latency
+ // is more informational.
+ if (mAudioFlinger->mLock.tryLock() == NO_ERROR) {
+ std::vector<PatchPanel::SoftwarePatch> swPatches;
+ double latencyMs;
+ status_t status = INVALID_OPERATION;
+ audio_patch_handle_t downstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ if (mAudioFlinger->mPatchPanel.getDownstreamSoftwarePatches(id(), &swPatches) == OK
+ && swPatches.size() > 0) {
+ status = swPatches[0].getLatencyMs_l(&latencyMs);
+ downstreamPatchHandle = swPatches[0].getPatchHandle();
+ }
+ if (downstreamPatchHandle != lastDownstreamPatchHandle) {
+ downstreamLatencyStatMs.reset();
+ lastDownstreamPatchHandle = downstreamPatchHandle;
+ }
+ if (status == OK) {
+ // verify downstream latency (we assume a max reasonable
+ // latency of 1 second).
+ if (latencyMs >= 0. && latencyMs <= 1000.) {
+ ALOGV("new downstream latency %lf ms", latencyMs);
+ downstreamLatencyStatMs.add(latencyMs);
+ } else {
+ ALOGD("out of range downstream latency %lf ms", latencyMs);
+ }
+ }
+ mAudioFlinger->mLock.unlock();
+ }
+ } else {
+ if (lastDownstreamPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
+ // our device is no longer AUDIO_DEVICE_OUT_BUS, reset patch handle and stats.
+ downstreamLatencyStatMs.reset();
+ lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ }
+ }
+
{ // scope for mLock
Mutex::Autolock _l(mLock);
@@ -3190,16 +3291,51 @@
logString = NULL;
}
+ // Collect timestamp statistics for the Playback Thread types that support it.
+ if (mType == MIXER
+ || mType == DUPLICATING
+ || mType == DIRECT
+ || mType == OFFLOAD) { // no indentation
// Gather the framesReleased counters for all active tracks,
// and associate with the sink frames written out. We need
// this to convert the sink timestamp to the track timestamp.
bool kernelLocationUpdate = false;
- if (mNormalSink != 0) {
- // Note: The DuplicatingThread may not have a mNormalSink.
+ ExtendedTimestamp timestamp; // use private copy to fetch
+ if (mStandby) {
+ mTimestampVerifier.discontinuity();
+ } else if (threadloop_getHalTimestamp_l(×tamp) == OK) {
+ mTimestampVerifier.add(timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+ mSampleRate);
+
+ if (isTimestampCorrectionEnabled()) {
+ ALOGV("TS_BEFORE: %d %lld %lld", id(),
+ (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+ (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
+ auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+ = correctedTimestamp.mFrames;
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
+ = correctedTimestamp.mTimeNs;
+ ALOGV("TS_AFTER: %d %lld %lld", id(),
+ (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+ (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
+
+ // Note: Downstream latency only added if timestamp correction enabled.
+ if (downstreamLatencyStatMs.getN() > 0) { // we have latency info.
+ const int64_t newPosition =
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+ - int64_t(downstreamLatencyStatMs.getMean() * mSampleRate * 1e-3);
+ // prevent retrograde
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = max(
+ newPosition,
+ (mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+ - mSuspendedFrames));
+ }
+ }
+
// We always fetch the timestamp here because often the downstream
// sink will block while writing.
- ExtendedTimestamp timestamp; // use private copy to fetch
- (void) mNormalSink->getTimestamp(timestamp);
// We keep track of the last valid kernel position in case we are in underrun
// and the normal mixer period is the same as the fast mixer period, or there
@@ -3228,7 +3364,10 @@
+ mSuspendedFrames; // add frames discarded when suspended
mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ } else {
+ mTimestampVerifier.error();
}
+
// mFramesWritten for non-offloaded tracks are contiguous
// even after standby() is called. This is useful for the track frame
// to sink frame mapping.
@@ -3256,10 +3395,12 @@
t->updateTrackFrameInfo(
t->mAudioTrackServerProxy->framesReleased(),
mFramesWritten,
+ mSampleRate,
mTimestamp);
}
}
}
+ } // if (mType ... ) { // no indentation
#if 0
// logFormat example
if (z % 100 == 0) {
@@ -3300,7 +3441,7 @@
continue;
}
- if ((!mActiveTracks.size() && systemTime() > mStandbyTimeNs) ||
+ if ((mActiveTracks.isEmpty() && systemTime() > mStandbyTimeNs) ||
isSuspended()) {
// put audio hardware into standby after short delay
if (shouldStandby_l()) {
@@ -3314,7 +3455,7 @@
mStandby = true;
}
- if (!mActiveTracks.size() && mConfigEvents.isEmpty()) {
+ if (mActiveTracks.isEmpty() && mConfigEvents.isEmpty()) {
// we're about to wait, flush the binder command buffer
IPCThreadState::self()->flushCommands();
@@ -3777,12 +3918,16 @@
destroyTrack_l(track);
}
-void AudioFlinger::PlaybackThread::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::PlaybackThread::toAudioPortConfig(struct audio_port_config *config)
{
- ThreadBase::getAudioPortConfig(config);
+ ThreadBase::toAudioPortConfig(config);
config->role = AUDIO_PORT_ROLE_SOURCE;
config->ext.mix.hw_module = mOutput->audioHwDev->handle();
config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
+ if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.output = mOutput->flags;
+ }
}
// ----------------------------------------------------------------------------
@@ -3863,9 +4008,7 @@
// create a MonoPipe to connect our submix to FastMixer
NBAIO_Format format = mOutputSink->format();
-#ifdef TEE_SINK
- NBAIO_Format origformat = format;
-#endif
+
// adjust format to match that of the Fast Mixer
ALOGV("format changed from %#x to %#x", format.mFormat, fastMixerFormat);
format.mFormat = fastMixerFormat;
@@ -3877,7 +4020,7 @@
MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
const NBAIO_Format offers[1] = {format};
size_t numCounterOffers = 0;
-#if !LOG_NDEBUG || defined(TEE_SINK)
+#if !LOG_NDEBUG
ssize_t index =
#else
(void)
@@ -3888,25 +4031,8 @@
(monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
mPipeSink = monoPipe;
-#ifdef TEE_SINK
- if (mTeeSinkOutputEnabled) {
- // create a Pipe to archive a copy of FastMixer's output for dumpsys
- Pipe *teeSink = new Pipe(mTeeSinkOutputFrames, origformat);
- const NBAIO_Format offers2[1] = {origformat};
- numCounterOffers = 0;
- index = teeSink->negotiate(offers2, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mTeeSink = teeSink;
- PipeReader *teeSource = new PipeReader(*teeSink);
- numCounterOffers = 0;
- index = teeSource->negotiate(offers2, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mTeeSource = teeSource;
- }
-#endif
-
// create fast mixer and configure it initially with just one fast track for our submix
- mFastMixer = new FastMixer();
+ mFastMixer = new FastMixer(mId);
FastMixerStateQueue *sq = mFastMixer->sq();
#ifdef STATE_QUEUE_DUMP
sq->setObserverDump(&mStateQueueObserverDump);
@@ -3932,9 +4058,6 @@
state->mColdFutexAddr = &mFastMixerFutex;
state->mColdGen++;
state->mDumpState = &mFastMixerDumpState;
-#ifdef TEE_SINK
- state->mTeeSink = mTeeSink.get();
-#endif
mFastMixerNBLogWriter = audioFlinger->newWriter_l(kFastMixerLogSize, "FastMixer");
state->mNBLogWriter = mFastMixerNBLogWriter.get();
sq->end();
@@ -3943,7 +4066,7 @@
// start the fast mixer
mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
pid_t tid = mFastMixer->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer, false /*forApp*/);
+ sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
stream()->setHalThreadPriority(kPriorityFastMixer);
#ifdef AUDIO_WATCHDOG
@@ -3952,9 +4075,14 @@
mAudioWatchdog->setDump(&mAudioWatchdogDump);
mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
tid = mAudioWatchdog->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer, false /*forApp*/);
+ sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
#endif
-
+ } else {
+#ifdef TEE_SINK
+ // Only use the MixerThread tee if there is no FastMixer.
+ mTee.set(mOutputSink->format(), NBAIO_Tee::TEE_FLAG_OUTPUT_THREAD);
+ mTee.setId(std::string("_") + std::to_string(mId) + "_M");
+#endif
}
switch (kUseFastMixer) {
@@ -4270,6 +4398,37 @@
mMixerBufferValid = false; // mMixerBuffer has no valid data until appropriate tracks found.
mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found.
+ // DeferredOperations handles statistics after setting mixerStatus.
+ class DeferredOperations {
+ public:
+ DeferredOperations(mixer_state *mixerStatus)
+ : mMixerStatus(mixerStatus) { }
+
+ // when leaving scope, tally frames properly.
+ ~DeferredOperations() {
+ // Tally underrun frames only if we are actually mixing (MIXER_TRACKS_READY)
+ // because that is when the underrun occurs.
+ // We do not distinguish between FastTracks and NormalTracks here.
+ if (*mMixerStatus == MIXER_TRACKS_READY) {
+ for (const auto &underrun : mUnderrunFrames) {
+ underrun.first->mAudioTrackServerProxy->tallyUnderrunFrames(
+ underrun.second);
+ }
+ }
+ }
+
+ // tallyUnderrunFrames() is called to update the track counters
+ // with the number of underrun frames for a particular mixer period.
+ // We defer tallying until we know the final mixer status.
+ void tallyUnderrunFrames(sp<Track> track, size_t underrunFrames) {
+ mUnderrunFrames.emplace_back(track, underrunFrames);
+ }
+
+ private:
+ const mixer_state * const mMixerStatus;
+ std::vector<std::pair<sp<Track>, size_t>> mUnderrunFrames;
+ } deferredOperations(&mixerStatus); // implicit nested scope for variable capture
+
for (size_t i=0 ; i<count ; i++) {
const sp<Track> t = mActiveTracks[i];
@@ -4304,13 +4463,14 @@
track->mObservedUnderruns = underruns;
// don't count underruns that occur while stopping or pausing
// or stopped which can occur when flush() is called while active
+ size_t underrunFrames = 0;
if (!(track->isStopping() || track->isPausing() || track->isStopped()) &&
recentUnderruns > 0) {
// FIXME fast mixer will pull & mix partial buffers, but we count as a full underrun
- track->mAudioTrackServerProxy->tallyUnderrunFrames(recentUnderruns * mFrameCount);
- } else {
- track->mAudioTrackServerProxy->tallyUnderrunFrames(0);
+ underrunFrames = recentUnderruns * mFrameCount;
}
+ // Immediately account for FastTrack underruns.
+ track->mAudioTrackServerProxy->tallyUnderrunFrames(underrunFrames);
// This is similar to the state machine for normal tracks,
// with a few modifications for fast tracks.
@@ -4725,13 +4885,13 @@
mixerStatus = MIXER_TRACKS_READY;
}
} else {
+ size_t underrunFrames = 0;
if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
ALOGV("track(%p) underrun, framesReady(%zu) < framesDesired(%zd)",
track, framesReady, desiredFrames);
- track->mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
- } else {
- track->mAudioTrackServerProxy->tallyUnderrunFrames(0);
+ underrunFrames = desiredFrames;
}
+ deferredOperations.tallyUnderrunFrames(track, underrunFrames);
// clear effect chain input buffer if an active track underruns to avoid sending
// previous audio buffer again to effects
@@ -5030,6 +5190,12 @@
dprintf(fd, " Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
dprintf(fd, " AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
dprintf(fd, " Master mono: %s\n", mMasterMono ? "on" : "off");
+ const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
+ if (latencyMs != 0.) {
+ dprintf(fd, " NormalMixer latency ms: %.2lf\n", latencyMs);
+ } else {
+ dprintf(fd, " NormalMixer latency ms: unavail\n");
+ }
if (hasFastMixer()) {
dprintf(fd, " FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
@@ -5038,9 +5204,8 @@
// while we are dumping it. It may be inconsistent, but it won't mutate!
// This is a large object so we place it on the heap.
// FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
- const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
+ const std::unique_ptr<FastMixerDumpState> copy(new FastMixerDumpState(mFastMixerDumpState));
copy->dump(fd);
- delete copy;
#ifdef STATE_QUEUE_DUMP
// Similar for state queue
@@ -5061,12 +5226,22 @@
} else {
dprintf(fd, " No FastMixer\n");
}
+}
-#ifdef TEE_SINK
- // Write the tee output to a .wav file
- dumpTee(fd, mTeeSource, mId, 'M');
-#endif
-
+Json::Value AudioFlinger::MixerThread::getJsonDump() const
+{
+ Json::Value root;
+ if (hasFastMixer()) {
+ // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
+ // while we are dumping it. It may be inconsistent, but it won't mutate!
+ // This is a large object so we place it on the heap.
+ // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+ const std::unique_ptr<FastMixerDumpState> copy(new FastMixerDumpState(mFastMixerDumpState));
+ root["fastmixer_stats"] = copy->getJsonDump();
+ } else {
+ root["fastmixer_stats"] = "no_fastmixer";
+ }
+ return root;
}
uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
@@ -5568,6 +5743,7 @@
mOutput->flush();
mHwPaused = false;
mFlushPending = false;
+ mTimestampVerifier.discontinuity(); // DIRECT and OFFLOADED flush resets frame count.
}
int64_t AudioFlinger::DirectOutputThread::computeWaitTimeNs_l() const {
@@ -5902,6 +6078,14 @@
track->presentationComplete(framesWritten, audioHALFrames);
track->reset();
tracksToRemove->add(track);
+ // DIRECT and OFFLOADED stop resets frame counts.
+ if (!mUseAsyncWrite) {
+ // If we don't get explicit drain notification we must
+ // register discontinuity regardless of whether this is
+ // the previous (!last) or the upcoming (last) track
+ // to avoid skipping the discontinuity.
+ mTimestampVerifier.discontinuity();
+ }
}
} else {
// No buffers for this track. Give it a few chances to
@@ -6067,7 +6251,22 @@
ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
{
for (size_t i = 0; i < outputTracks.size(); i++) {
- outputTracks[i]->write(mSinkBuffer, writeFrames);
+ const ssize_t actualWritten = outputTracks[i]->write(mSinkBuffer, writeFrames);
+
+ // Consider the first OutputTrack for timestamp and frame counting.
+
+ // The threadLoop() generally assumes writing a full sink buffer size at a time.
+ // Here, we correct for writeFrames of 0 (a stop) or underruns because
+ // we always claim success.
+ if (i == 0) {
+ const ssize_t correction = mSinkBufferSize / mFrameSize - actualWritten;
+ ALOGD_IF(correction != 0 && writeFrames != 0,
+ "%s: writeFrames:%u actualWritten:%zd correction:%zd mFramesWritten:%lld",
+ __func__, writeFrames, actualWritten, correction, (long long)mFramesWritten);
+ mFramesWritten -= correction;
+ }
+
+ // TODO: Report correction for the other output tracks and show in the dump.
}
mStandby = false;
return (ssize_t)mSinkBufferSize;
@@ -6232,9 +6431,6 @@
audio_devices_t outDevice,
audio_devices_t inDevice,
bool systemReady
-#ifdef TEE_SINK
- , const sp<NBAIO_Sink>& teeSink
-#endif
) :
ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD, systemReady),
mInput(input),
@@ -6242,9 +6438,6 @@
mRsmpInBuffer(NULL),
// mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
mRsmpInRear(0)
-#ifdef TEE_SINK
- , mTeeSink(teeSink)
-#endif
, mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize,
"RecordThreadRO", MemoryHeapBase::READ_ONLY))
// mFastCapture below
@@ -6261,8 +6454,20 @@
snprintf(mThreadName, kThreadNameLength, "AudioIn_%X", id);
mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
+ if (mInput != nullptr && mInput->audioHwDev != nullptr) {
+ mIsMsdDevice = strcmp(
+ mInput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
+ }
+
readInputParameters_l();
+ // TODO: We may also match on address as well as device type for
+ // AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_IN_REMOTE_SUBMIX
+ mTimestampCorrectedDevices = (audio_devices_t)property_get_int64(
+ "audio.timestamp.corrected_input_devices",
+ (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_IN_BUS // turn on by default for MSD
+ : AUDIO_DEVICE_NONE));
+
// create an NBAIO source for the HAL input stream, and negotiate
mInputSource = new AudioStreamInSource(input->stream);
size_t numCounterOffers = 0;
@@ -6359,7 +6564,7 @@
// start the fast capture
mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
pid_t tid = mFastCapture->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture, false /*forApp*/);
+ sendPrioConfigEvent(getpid(), tid, kPriorityFastCapture, false /*forApp*/);
stream()->setHalThreadPriority(kPriorityFastCapture);
#ifdef AUDIO_WATCHDOG
// FIXME
@@ -6367,6 +6572,10 @@
mFastTrackAvail = true;
}
+#ifdef TEE_SINK
+ mTee.set(mInputSource->format(), NBAIO_Tee::TEE_FLAG_INPUT_THREAD);
+ mTee.setId(std::string("_") + std::to_string(mId) + "_C");
+#endif
failed: ;
// FIXME mNormalSource
@@ -6545,7 +6754,7 @@
}
// sleep if there are no active tracks to process
- if (activeTracks.size() == 0) {
+ if (activeTracks.isEmpty()) {
if (sleepUs == 0) {
sleepUs = kRecordThreadSleepUs;
}
@@ -6596,6 +6805,14 @@
}
didModify = true;
}
+ AudioBufferProvider* abp = (fastTrack != 0 && fastTrack->isPatchTrack()) ?
+ reinterpret_cast<AudioBufferProvider*>(fastTrack.get()) : nullptr;
+ if (state->mFastPatchRecordBufferProvider != abp) {
+ state->mFastPatchRecordBufferProvider = abp;
+ state->mFastPatchRecordFormat = fastTrack == 0 ?
+ AUDIO_FORMAT_INVALID : fastTrack->format();
+ didModify = true;
+ }
sq->end(didModify);
if (didModify) {
sq->push(block);
@@ -6621,8 +6838,7 @@
// If an NBAIO source is present, use it to read the normal capture's data
if (mPipeSource != 0) {
- size_t framesToRead = mBufferSize / mFrameSize;
- framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
+ size_t framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
// The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
// to the full buffer point (clearing the overflow condition). Upon OVERRUN error,
@@ -6684,8 +6900,24 @@
// Update server timestamp with kernel stats
if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
int64_t position, time;
- int ret = mInput->stream->getCapturePosition(&position, &time);
- if (ret == NO_ERROR) {
+ if (mStandby) {
+ mTimestampVerifier.discontinuity();
+ } else if (mInput->stream->getCapturePosition(&position, &time) == NO_ERROR
+ && time > mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]) {
+
+ mTimestampVerifier.add(position, time, mSampleRate);
+
+ // Correct timestamps
+ if (isTimestampCorrectionEnabled()) {
+ ALOGV("TS_BEFORE: %d %lld %lld",
+ id(), (long long)time, (long long)position);
+ auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
+ position = correctedTimestamp.mFrames;
+ time = correctedTimestamp.mTimeNs;
+ ALOGV("TS_AFTER: %d %lld %lld",
+ id(), (long long)time, (long long)position);
+ }
+
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
// Note: In general record buffers should tend to be empty in
@@ -6693,6 +6925,8 @@
//
// Also, it is not advantageous to call get_presentation_position during the read
// as the read obtains a lock, preventing the timestamp call from executing.
+ } else {
+ mTimestampVerifier.error();
}
}
// Use this to track timestamp information
@@ -6708,10 +6942,11 @@
goto unlock;
}
ALOG_ASSERT(framesRead > 0);
+ mFramesRead += framesRead;
- if (mTeeSink != 0) {
- (void) mTeeSink->write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
- }
+#ifdef TEE_SINK
+ (void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
+#endif
// If destination is non-contiguous, we now correct for reading past end of buffer.
{
size_t part1 = mRsmpInFramesP2 - rear;
@@ -6769,9 +7004,33 @@
framesOut = min(framesOut,
destinationFramesPossible(
framesIn, mSampleRate, activeTrack->mSampleRate));
- // process frames from the RecordThread buffer provider to the RecordTrack buffer
- framesOut = activeTrack->mRecordBufferConverter->convert(
- activeTrack->mSink.raw, activeTrack->mResamplerBufferProvider, framesOut);
+
+ if (activeTrack->isDirect()) {
+ // No RecordBufferConverter used for compressed formats. Pass
+ // straight from RecordThread buffer to RecordTrack buffer.
+ AudioBufferProvider::Buffer buffer;
+ buffer.frameCount = framesOut;
+ status_t status = activeTrack->mResamplerBufferProvider->getNextBuffer(&buffer);
+ if (status == OK && buffer.frameCount != 0) {
+ ALOGV_IF(buffer.frameCount != framesOut,
+ "%s() read less than expected (%zu vs %zu)",
+ __func__, buffer.frameCount, framesOut);
+ framesOut = buffer.frameCount;
+ memcpy(activeTrack->mSink.raw, buffer.raw, buffer.frameCount);
+ activeTrack->mResamplerBufferProvider->releaseBuffer(&buffer);
+ } else {
+ framesOut = 0;
+ ALOGE("%s() cannot fill request, status: %d, frameCount: %zu",
+ __func__, status, buffer.frameCount);
+ }
+ } else {
+ // process frames from the RecordThread buffer provider to the RecordTrack
+ // buffer
+ framesOut = activeTrack->mRecordBufferConverter->convert(
+ activeTrack->mSink.raw,
+ activeTrack->mResamplerBufferProvider,
+ framesOut);
+ }
if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
overrun = OVERRUN_FALSE;
@@ -6942,6 +7201,12 @@
goto Exit;
}
+ if (!audio_is_linear_pcm(mFormat) && (*flags & AUDIO_INPUT_FLAG_DIRECT) == 0) {
+ ALOGE("createRecordTrack_l() on an encoded stream requires AUDIO_INPUT_FLAG_DIRECT");
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+
if (*pSampleRate == 0) {
*pSampleRate = mSampleRate;
}
@@ -7143,8 +7408,10 @@
// see previously buffered data before it called start(), but with greater risk of overrun.
recordTrack->mResamplerBufferProvider->reset();
- // clear any converter state as new data will be discontinuous
- recordTrack->mRecordBufferConverter->reset();
+ if (!recordTrack->isDirect()) {
+ // clear any converter state as new data will be discontinuous
+ recordTrack->mRecordBufferConverter->reset();
+ }
recordTrack->mState = TrackBase::STARTING_2;
// signal thread to start
mWaitWorkCV.broadcast();
@@ -7300,7 +7567,8 @@
audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
dprintf(fd, " AudioStreamIn: %p flags %#x (%s)\n",
input, flags, inputFlagsToString(flags).c_str());
- if (mActiveTracks.size() == 0) {
+ dprintf(fd, " Frames read: %lld\n", (long long)mFramesRead);
+ if (mActiveTracks.isEmpty()) {
dprintf(fd, " No active record clients\n");
}
@@ -7309,6 +7577,14 @@
(void)input->stream->dump(fd);
}
+ const double latencyMs = audio_is_linear_pcm(mFormat)
+ ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
+ if (latencyMs != 0.) {
+ dprintf(fd, " NormalRecord latency ms: %.2lf\n", latencyMs);
+ } else {
+ dprintf(fd, " NormalRecord latency ms: unavail\n");
+ }
+
dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
@@ -7316,9 +7592,8 @@
// while we are dumping it. It may be inconsistent, but it won't mutate!
// This is a large object so we place it on the heap.
// FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
- const FastCaptureDumpState *copy = new FastCaptureDumpState(mFastCaptureDumpState);
+ std::unique_ptr<FastCaptureDumpState> copy(new FastCaptureDumpState(mFastCaptureDumpState));
copy->dump(fd);
- delete copy;
}
void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
@@ -7332,7 +7607,7 @@
if (numtracks) {
dprintf(fd, " of which %zu are active\n", numactive);
result.append(prefix);
- RecordTrack::appendDumpHeader(result);
+ mTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numtracks ; ++i) {
sp<RecordTrack> track = mTracks[i];
if (track != 0) {
@@ -7352,7 +7627,7 @@
result.append(" The following tracks are in the active list but"
" not in the track list\n");
result.append(prefix);
- RecordTrack::appendDumpHeader(result);
+ mActiveTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numactive; ++i) {
sp<RecordTrack> track = mActiveTracks[i];
if (mTracks.indexOf(track) < 0) {
@@ -7647,10 +7922,15 @@
{
status_t result = mInput->stream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
- mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
- LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d", mChannelCount, FCC_8);
mFormat = mHALFormat;
- LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
+ mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+ if (audio_is_linear_pcm(mFormat)) {
+ LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d",
+ mChannelCount, FCC_8);
+ } else {
+ // Can have more that FCC_8 channels in encoded streams.
+ ALOGI("HAL format %#x is not linear pcm", mFormat);
+ }
result = mInput->stream->getFrameSize(&mFrameSize);
LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
result = mInput->stream->getBufferSize(&mBufferSize);
@@ -7755,7 +8035,7 @@
status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain)
{
// only one chain per input thread
- if (mEffectChains.size() != 0) {
+ if (!mEffectChains.isEmpty()) {
ALOGW("addEffectChain_l() already one chain %p on thread %p", chain.get(), this);
return INVALID_OPERATION;
}
@@ -7860,24 +8140,28 @@
return status;
}
-void AudioFlinger::RecordThread::addPatchRecord(const sp<PatchRecord>& record)
+void AudioFlinger::RecordThread::addPatchTrack(const sp<PatchRecord>& record)
{
Mutex::Autolock _l(mLock);
mTracks.add(record);
}
-void AudioFlinger::RecordThread::deletePatchRecord(const sp<PatchRecord>& record)
+void AudioFlinger::RecordThread::deletePatchTrack(const sp<PatchRecord>& record)
{
Mutex::Autolock _l(mLock);
destroyTrack_l(record);
}
-void AudioFlinger::RecordThread::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::RecordThread::toAudioPortConfig(struct audio_port_config *config)
{
- ThreadBase::getAudioPortConfig(config);
+ ThreadBase::toAudioPortConfig(config);
config->role = AUDIO_PORT_ROLE_SINK;
config->ext.mix.hw_module = mInput->audioHwDev->handle();
config->ext.mix.usecase.source = mAudioSource;
+ if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.input = mInput->flags;
+ }
}
// ----------------------------------------------------------------------------
@@ -7964,7 +8248,7 @@
}
// This will decrement references and may cause the destruction of this thread.
if (isOutput()) {
- AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+ AudioSystem::releaseOutput(mPortId);
} else {
AudioSystem::releaseInput(mPortId);
}
@@ -8078,7 +8362,7 @@
bool silenced = false;
if (isOutput()) {
- ret = AudioSystem::startOutput(mId, streamType(), mSessionId);
+ ret = AudioSystem::startOutput(portId);
} else {
ret = AudioSystem::startInput(portId, &silenced);
}
@@ -8087,10 +8371,10 @@
// abort if start is rejected by audio policy manager
if (ret != NO_ERROR) {
ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
- if (mActiveTracks.size() != 0) {
+ if (!mActiveTracks.isEmpty()) {
mLock.unlock();
if (isOutput()) {
- AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+ AudioSystem::releaseOutput(portId);
} else {
AudioSystem::releaseInput(portId);
}
@@ -8113,7 +8397,7 @@
// Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
sp<MmapTrack> track = new MmapTrack(this, mAttr, mSampleRate, mFormat, mChannelMask, mSessionId,
- client.clientUid, client.clientPid, portId);
+ isOutput(), client.clientUid, client.clientPid, portId);
track->setSilenced_l(silenced);
mActiveTracks.add(track);
@@ -8162,8 +8446,8 @@
mLock.unlock();
if (isOutput()) {
- AudioSystem::stopOutput(mId, streamType(), track->sessionId());
- AudioSystem::releaseOutput(mId, streamType(), track->sessionId());
+ AudioSystem::stopOutput(track->portId());
+ AudioSystem::releaseOutput(track->portId());
} else {
AudioSystem::stopInput(track->portId());
AudioSystem::releaseInput(track->portId());
@@ -8188,7 +8472,7 @@
if (mHalStream == 0) {
return NO_INIT;
}
- if (mActiveTracks.size() != 0) {
+ if (!mActiveTracks.isEmpty()) {
return INVALID_OPERATION;
}
mHalStream->standby();
@@ -8459,9 +8743,9 @@
return status;
}
-void AudioFlinger::MmapThread::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::MmapThread::toAudioPortConfig(struct audio_port_config *config)
{
- ThreadBase::getAudioPortConfig(config);
+ ThreadBase::toAudioPortConfig(config);
if (isOutput()) {
config->role = AUDIO_PORT_ROLE_SOURCE;
config->ext.mix.hw_module = mAudioHwDev->handle();
@@ -8626,7 +8910,7 @@
dprintf(fd, " Attributes: content type %d usage %d source %d\n",
mAttr.content_type, mAttr.usage, mAttr.source);
dprintf(fd, " Session: %d port Id: %d\n", mSessionId, mPortId);
- if (mActiveTracks.size() == 0) {
+ if (mActiveTracks.isEmpty()) {
dprintf(fd, " No active clients\n");
}
}
@@ -8639,7 +8923,7 @@
const char *prefix = " ";
if (numtracks) {
result.append(prefix);
- MmapTrack::appendDumpHeader(result);
+ mActiveTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numtracks ; ++i) {
sp<MmapTrack> track = mActiveTracks[i];
result.append(prefix);
@@ -8845,6 +9129,15 @@
}
}
+void AudioFlinger::MmapPlaybackThread::toAudioPortConfig(struct audio_port_config *config)
+{
+ MmapThread::toAudioPortConfig(config);
+ if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.output = mOutput->flags;
+ }
+}
+
void AudioFlinger::MmapPlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
{
MmapThread::dumpInternals(fd, args);
@@ -8935,4 +9228,13 @@
}
}
+void AudioFlinger::MmapCaptureThread::toAudioPortConfig(struct audio_port_config *config)
+{
+ MmapThread::toAudioPortConfig(config);
+ if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.input = mInput->flags;
+ }
+}
+
} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index ea29455..dce3d2e 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -281,7 +281,7 @@
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle) = 0;
virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle) = 0;
- virtual void getAudioPortConfig(struct audio_port_config *config) = 0;
+ virtual void toAudioPortConfig(struct audio_port_config *config) = 0;
// see note at declaration of mStandby, mOutDevice and mInDevice
@@ -393,6 +393,10 @@
void broadcast_l();
+ virtual bool isTimestampCorrectionEnabled() const { return false; }
+
+ bool isMsdDevice() const { return mIsMsdDevice; }
+
mutable Mutex mLock;
protected:
@@ -434,6 +438,12 @@
virtual void setMasterMono_l(bool mono __unused) { }
virtual bool requireMonoBlend() { return false; }
+ // called within the threadLoop to obtain timestamp from the HAL.
+ virtual status_t threadloop_getHalTimestamp_l(
+ ExtendedTimestamp *timestamp __unused) const {
+ return INVALID_OPERATION;
+ }
+
friend class AudioFlinger; // for mEffectChains
const type_t mType;
@@ -493,10 +503,17 @@
sp<NBLog::Writer> mNBLogWriter;
bool mSystemReady;
ExtendedTimestamp mTimestamp;
+ TimestampVerifier< // For timestamp statistics.
+ int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
+ audio_devices_t mTimestampCorrectedDevices = AUDIO_DEVICE_NONE;
+ bool mIsMsdDevice = false;
// A condition that must be evaluated by the thread loop has changed and
// we must not wait for async write callback in the thread loop before evaluating it
bool mSignalPending;
+#ifdef TEE_SINK
+ NBAIO_Tee mTee;
+#endif
// ActiveTracks is a sorted vector of track type T representing the
// active tracks of threadLoop() to be considered by the locked prepare portion.
// ActiveTracks should be accessed with the ThreadBase lock held.
@@ -543,6 +560,9 @@
size_t size() const {
return mActiveTracks.size();
}
+ bool isEmpty() const {
+ return mActiveTracks.isEmpty();
+ }
ssize_t indexOf(const sp<T>& item) {
return mActiveTracks.indexOf(item);
}
@@ -646,6 +666,8 @@
virtual ~PlaybackThread();
void dump(int fd, const Vector<String16>& args);
+ // returns a string of audio performance related data in JSON format.
+ virtual Json::Value getJsonDump() const;
// Thread virtuals
virtual bool threadLoop();
@@ -784,7 +806,7 @@
void addPatchTrack(const sp<PatchTrack>& track);
void deletePatchTrack(const sp<PatchTrack>& track);
- virtual void getAudioPortConfig(struct audio_port_config *config);
+ virtual void toAudioPortConfig(struct audio_port_config *config);
// Return the asynchronous signal wait time.
virtual int64_t computeWaitTimeNs_l() const { return INT64_MAX; }
@@ -801,6 +823,11 @@
&& mTracks.size() < PlaybackThread::kMaxTracks;
}
+ bool isTimestampCorrectionEnabled() const override {
+ const audio_devices_t device =
+ mOutDevice & mTimestampCorrectedDevices;
+ return audio_is_output_devices(device) && popcount(device) > 0;
+ }
protected:
// updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
@@ -1056,11 +1083,6 @@
sp<NBAIO_Sink> mPipeSink;
// The current sink for the normal mixer to write it's (sub)mix, mOutputSink or mPipeSink
sp<NBAIO_Sink> mNormalSink;
-#ifdef TEE_SINK
- // For dumpsys
- sp<NBAIO_Sink> mTeeSink;
- sp<NBAIO_Source> mTeeSource;
-#endif
uint32_t mScreenState; // cached copy of gScreenState
// TODO: add comment and adjust size as needed
static const size_t kFastMixerLogSize = 8 * 1024;
@@ -1098,6 +1120,7 @@
virtual bool checkForNewParameter_l(const String8& keyValuePair,
status_t& status);
virtual void dumpInternals(int fd, const Vector<String16>& args);
+ Json::Value getJsonDump() const override;
virtual bool isTrackAllowed_l(
audio_channel_mask_t channelMask, audio_format_t format,
@@ -1154,6 +1177,14 @@
return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
}
+ status_t threadloop_getHalTimestamp_l(
+ ExtendedTimestamp *timestamp) const override {
+ if (mNormalSink.get() != nullptr) {
+ return mNormalSink->getTimestamp(*timestamp);
+ }
+ return INVALID_OPERATION;
+ }
+
protected:
virtual void setMasterMono_l(bool mono) {
mMasterMono.store(mono);
@@ -1211,6 +1242,23 @@
virtual bool hasFastMixer() const { return false; }
virtual int64_t computeWaitTimeNs_l() const override;
+
+ status_t threadloop_getHalTimestamp_l(ExtendedTimestamp *timestamp) const override {
+ // For DIRECT and OFFLOAD threads, query the output sink directly.
+ if (mOutput != nullptr) {
+ uint64_t uposition64;
+ struct timespec time;
+ if (mOutput->getPresentationPosition(
+ &uposition64, &time) == OK) {
+ timestamp->mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+ = (int64_t)uposition64;
+ timestamp->mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
+ = audio_utils_ns_from_timespec(&time);
+ return NO_ERROR;
+ }
+ }
+ return INVALID_OPERATION;
+ }
};
class OffloadThread : public DirectOutputThread {
@@ -1318,6 +1366,22 @@
SortedVector < sp<OutputTrack> > mOutputTracks;
public:
virtual bool hasFastMixer() const { return false; }
+ status_t threadloop_getHalTimestamp_l(
+ ExtendedTimestamp *timestamp) const override {
+ if (mOutputTracks.size() > 0) {
+ // forward the first OutputTrack's kernel information for timestamp.
+ const ExtendedTimestamp trackTimestamp =
+ mOutputTracks[0]->getClientProxyTimestamp();
+ if (trackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0) {
+ timestamp->mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+ trackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ timestamp->mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+ trackTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+ return OK; // discard server timestamp - that's ignored.
+ }
+ }
+ return INVALID_OPERATION;
+ }
};
// record thread
@@ -1376,9 +1440,6 @@
audio_devices_t outDevice,
audio_devices_t inDevice,
bool systemReady
-#ifdef TEE_SINK
- , const sp<NBAIO_Sink>& teeSink
-#endif
);
virtual ~RecordThread();
@@ -1439,8 +1500,8 @@
audio_patch_handle_t *handle);
virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle);
- void addPatchRecord(const sp<PatchRecord>& record);
- void deletePatchRecord(const sp<PatchRecord>& record);
+ void addPatchTrack(const sp<PatchRecord>& record);
+ void deletePatchTrack(const sp<PatchRecord>& record);
void readInputParameters_l();
virtual uint32_t getInputFramesLost();
@@ -1461,7 +1522,7 @@
virtual size_t frameCount() const { return mFrameCount; }
bool hasFastCapture() const { return mFastCapture != 0; }
- virtual void getAudioPortConfig(struct audio_port_config *config);
+ virtual void toAudioPortConfig(struct audio_port_config *config);
virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
audio_session_t sessionId);
@@ -1481,6 +1542,13 @@
void updateMetadata_l() override;
+ bool fastTrackAvailable() const { return mFastTrackAvail; }
+
+ bool isTimestampCorrectionEnabled() const override {
+ // checks popcount for exactly one device.
+ return audio_is_input_device(
+ mInDevice & mTimestampCorrectedDevices);
+ }
private:
// Enter standby if not already in standby, and set mStandby flag
void standbyIfNotAlreadyInStandby();
@@ -1508,8 +1576,6 @@
int32_t mRsmpInRear; // last filled frame + 1
// For dumpsys
- const sp<NBAIO_Sink> mTeeSink;
-
const sp<MemoryDealer> mReadOnlyHeap;
// one-time initialization, no locks required
@@ -1552,6 +1618,8 @@
bool mFastTrackAvail; // true if fast track available
// common state to all record threads
std::atomic_bool mBtNrecSuspended;
+
+ int64_t mFramesRead = 0; // continuous running counter.
};
class MmapThread : public ThreadBase
@@ -1604,7 +1672,7 @@
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle);
virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle);
- virtual void getAudioPortConfig(struct audio_port_config *config);
+ virtual void toAudioPortConfig(struct audio_port_config *config);
virtual sp<StreamHalInterface> stream() const { return mHalStream; }
virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
@@ -1688,6 +1756,8 @@
void updateMetadata_l() override;
+ virtual void toAudioPortConfig(struct audio_port_config *config);
+
protected:
audio_stream_type_t mStreamType;
@@ -1716,6 +1786,8 @@
void processVolume_l() override;
void setRecordSilenced(uid_t uid, bool silenced) override;
+ virtual void toAudioPortConfig(struct audio_port_config *config);
+
protected:
AudioStreamIn* mInput;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index ccfb69f..a43cb75 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -91,6 +91,7 @@
void* buffer() const { return mBuffer; }
size_t bufferSize() const { return mBufferSize; }
virtual bool isFastTrack() const = 0;
+ virtual bool isDirect() const = 0;
bool isOutputTrack() const { return (mType == TYPE_OUTPUT); }
bool isPatchTrack() const { return (mType == TYPE_PATCH); }
bool isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
@@ -100,6 +101,105 @@
audio_attributes_t attributes() const { return mAttr; }
+#ifdef TEE_SINK
+ void dumpTee(int fd, const std::string &reason) const {
+ mTee.dump(fd, reason);
+ }
+#endif
+
+ /** returns the buffer contents size converted to time in milliseconds
+ * for PCM Playback or Record streaming tracks. The return value is zero for
+ * PCM static tracks and not defined for non-PCM tracks.
+ *
+ * This may be called without the thread lock.
+ */
+ virtual double bufferLatencyMs() const {
+ return mServerProxy->framesReadySafe() * 1000 / sampleRate();
+ }
+
+ /** returns whether the track supports server latency computation.
+ * This is set in the constructor and constant throughout the track lifetime.
+ */
+
+ bool isServerLatencySupported() const { return mServerLatencySupported; }
+
+ /** computes the server latency for PCM Playback or Record track
+ * to the device sink/source. This is the time for the next frame in the track buffer
+ * written or read from the server thread to the device source or sink.
+ *
+ * This may be called without the thread lock, but latencyMs and fromTrack
+ * may be not be synchronized. For example PatchPanel may not obtain the
+ * thread lock before calling.
+ *
+ * \param latencyMs on success is set to the latency in milliseconds of the
+ * next frame written/read by the server thread to/from the track buffer
+ * from the device source/sink.
+ * \param fromTrack on success is set to true if latency was computed directly
+ * from the track timestamp; otherwise set to false if latency was
+ * estimated from the server timestamp.
+ * fromTrack may be nullptr or omitted if not required.
+ *
+ * \returns OK or INVALID_OPERATION on failure.
+ */
+ status_t getServerLatencyMs(double *latencyMs, bool *fromTrack = nullptr) const {
+ if (!isServerLatencySupported()) {
+ return INVALID_OPERATION;
+ }
+
+ // if no thread lock is acquired, these atomics are not
+ // synchronized with each other, considered a benign race.
+
+ const double serverLatencyMs = mServerLatencyMs.load();
+ if (serverLatencyMs == 0.) {
+ return INVALID_OPERATION;
+ }
+ if (fromTrack != nullptr) {
+ *fromTrack = mServerLatencyFromTrack.load();
+ }
+ *latencyMs = serverLatencyMs;
+ return OK;
+ }
+
+ /** computes the total client latency for PCM Playback or Record tracks
+ * for the next client app access to the device sink/source; i.e. the
+ * server latency plus the buffer latency.
+ *
+ * This may be called without the thread lock, but latencyMs and fromTrack
+ * may be not be synchronized. For example PatchPanel may not obtain the
+ * thread lock before calling.
+ *
+ * \param latencyMs on success is set to the latency in milliseconds of the
+ * next frame written/read by the client app to/from the track buffer
+ * from the device sink/source.
+ * \param fromTrack on success is set to true if latency was computed directly
+ * from the track timestamp; otherwise set to false if latency was
+ * estimated from the server timestamp.
+ * fromTrack may be nullptr or omitted if not required.
+ *
+ * \returns OK or INVALID_OPERATION on failure.
+ */
+ status_t getTrackLatencyMs(double *latencyMs, bool *fromTrack = nullptr) const {
+ double serverLatencyMs;
+ status_t status = getServerLatencyMs(&serverLatencyMs, fromTrack);
+ if (status == OK) {
+ *latencyMs = serverLatencyMs + bufferLatencyMs();
+ }
+ return status;
+ }
+
+ // TODO: Consider making this external.
+ struct FrameTime {
+ int64_t frames;
+ int64_t timeNs;
+ };
+
+ // KernelFrameTime is updated per "mix" period even for non-pcm tracks.
+ void getKernelFrameTime(FrameTime *ft) const {
+ *ft = mKernelFrameTime.load();
+ }
+
+ audio_format_t format() const { return mFormat; }
+
protected:
DISALLOW_COPY_AND_ASSIGN(TrackBase);
@@ -111,8 +211,6 @@
// but putting it in TrackBase avoids the complexity of virtual inheritance
virtual size_t framesReady() const { return SIZE_MAX; }
- audio_format_t format() const { return mFormat; }
-
uint32_t channelCount() const { return mChannelCount; }
audio_channel_mask_t channelMask() const { return mChannelMask; }
@@ -208,13 +306,19 @@
const bool mIsOut;
sp<ServerProxy> mServerProxy;
const int mId;
- sp<NBAIO_Sink> mTeeSink;
- sp<NBAIO_Source> mTeeSource;
+#ifdef TEE_SINK
+ NBAIO_Tee mTee;
+#endif
bool mTerminated;
track_type mType; // must be one of TYPE_DEFAULT, TYPE_OUTPUT, TYPE_PATCH ...
audio_io_handle_t mThreadIoHandle; // I/O handle of the thread the track is attached to
audio_port_handle_t mPortId; // unique ID for this track used by audio policy
bool mIsInvalid; // non-resettable latch, set by invalidate()
+
+ bool mServerLatencySupported = false;
+ std::atomic<bool> mServerLatencyFromTrack{}; // latency from track or server timestamp.
+ std::atomic<double> mServerLatencyMs{}; // last latency pushed from server thread.
+ std::atomic<FrameTime> mKernelFrameTime{}; // last frame time on kernel side.
};
// PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index a7c4253..78e6c6c 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -28,11 +28,11 @@
#include <private/media/AudioTrackShared.h>
#include "AudioFlinger.h"
-#include "ServiceUtilities.h"
#include <media/nbaio/Pipe.h>
#include <media/nbaio/PipeReader.h>
#include <media/RecordBufferConverter.h>
+#include <mediautils/ServiceUtilities.h>
#include <audio_utils/minifloat.h>
// ----------------------------------------------------------------------------
@@ -102,7 +102,7 @@
mIsInvalid(false)
{
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isTrustedCallingUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
+ if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
clientUid = callingUid;
@@ -210,22 +210,7 @@
mBufferSize = bufferSize;
#ifdef TEE_SINK
- if (mTeeSinkTrackEnabled) {
- NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount, mFormat);
- if (Format_isValid(pipeFormat)) {
- Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
- size_t numCounterOffers = 0;
- const NBAIO_Format offers[1] = {pipeFormat};
- ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- PipeReader *pipeReader = new PipeReader(*pipe);
- numCounterOffers = 0;
- index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mTeeSink = pipe;
- mTeeSource = pipeReader;
- }
- }
+ mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
#endif
}
@@ -244,9 +229,6 @@
AudioFlinger::ThreadBase::TrackBase::~TrackBase()
{
-#ifdef TEE_SINK
- dumpTee(-1, mTeeSource, mId, 'T');
-#endif
// delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
mServerProxy.clear();
if (mCblk != NULL) {
@@ -274,9 +256,7 @@
void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
{
#ifdef TEE_SINK
- if (mTeeSink != 0) {
- (void) mTeeSink->write(buffer->raw, buffer->frameCount);
- }
+ mTee.write(buffer->raw, buffer->frameCount);
#endif
ServerProxy::Buffer buf;
@@ -454,6 +434,14 @@
thread->mFastTrackAvailMask &= ~(1 << i);
}
mName = TRACK_NAME_PENDING;
+
+ mServerLatencySupported = thread->type() == ThreadBase::MIXER
+ || thread->type() == ThreadBase::DUPLICATING;
+#ifdef TEE_SINK
+ mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+ + "_" + std::to_string(mId) +
+ + "_PEND_T");
+#endif
}
AudioFlinger::PlaybackThread::Track::~Track()
@@ -498,18 +486,20 @@
wasActive = playbackThread->destroyTrack_l(this);
}
if (isExternalTrack() && !wasActive) {
- AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, mSessionId);
+ AudioSystem::releaseOutput(mPortId);
}
}
}
-/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
+void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
{
- result.append("T Name Active Client Session S Flags "
- " Format Chn mask SRate "
- "ST L dB R dB VS dB "
- " Server FrmCnt FrmRdy F Underruns Flushed "
- "Main Buf Aux Buf\n");
+ result.appendFormat("T Name Active Client Session S Flags "
+ " Format Chn mask SRate "
+ "ST Usg CT "
+ " G db L dB R dB VS dB "
+ " Server FrmCnt FrmRdy F Underruns Flushed"
+ "%s\n",
+ isServerLatencySupported() ? " Latency" : "");
}
void AudioFlinger::PlaybackThread::Track::appendDump(String8& result, bool active)
@@ -518,7 +508,7 @@
switch (mType) {
case TYPE_DEFAULT:
case TYPE_OUTPUT:
- if (mSharedBuffer.get() != nullptr) {
+ if (isStatic()) {
trackType = 'S'; // static
} else {
trackType = ' '; // normal
@@ -594,21 +584,25 @@
? 'e' /* error */ : ' ' /* identical */;
result.appendFormat("%7s %6u %7u %2s 0x%03X "
- "%08X %08X %6u "
- "%2u %5.2g %5.2g %5.2g%c "
- "%08X %6zu%c %6zu %c %9u%c %7u "
- "%08zX %08zX\n",
+ "%08X %08X %6u "
+ "%2u %3x %2x "
+ "%5.2g %5.2g %5.2g %5.2g%c "
+ "%08X %6zu%c %6zu %c %9u%c %7u",
active ? "yes" : "no",
- (mClient == 0) ? getpid_cached : mClient->pid(),
+ (mClient == 0) ? getpid() : mClient->pid(),
mSessionId,
getTrackStateString(),
mCblk->mFlags,
mFormat,
mChannelMask,
- mAudioTrackServerProxy->getSampleRate(),
+ sampleRate(),
mStreamType,
+ mAttr.usage,
+ mAttr.content_type,
+
+ 20.0 * log10(mFinalVolume),
20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
@@ -621,11 +615,21 @@
fillingStatus,
mAudioTrackServerProxy->getUnderrunFrames(),
nowInUnderrun,
- (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000,
-
- (size_t)mMainBuffer, // use %zX as %p appends 0x
- (size_t)mAuxBuffer // use %zX as %p appends 0x
+ (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000
);
+
+ if (isServerLatencySupported()) {
+ double latencyMs;
+ bool fromTrack;
+ if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
+ // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
+ // or 'k' if estimated from kernel because track frames haven't been presented yet.
+ result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
+ } else {
+ result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
+ }
+ }
+ result.append("\n");
}
uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
@@ -682,6 +686,13 @@
mAudioTrackServerProxy->setTimestamp(timestamp);
// We do not set drained here, as FastTrack timestamp may not go to very last frame.
+
+ // Compute latency.
+ // TODO: Consider whether the server latency may be passed in by FastMixer
+ // as a constant for all active FastTracks.
+ const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
+ mServerLatencyFromTrack.store(true);
+ mServerLatencyMs.store(latencyMs);
}
// Don't call for fast tracks; the framesReady() could result in priority inversion
@@ -985,7 +996,7 @@
// Signal thread to fetch new volume.
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
- Mutex::Autolock _l(thread->mLock);
+ Mutex::Autolock _l(thread->mLock);
thread->broadcast_l();
}
}
@@ -1246,7 +1257,17 @@
//To be called with thread lock held
void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
int64_t trackFramesReleased, int64_t sinkFramesWritten,
- const ExtendedTimestamp &timeStamp) {
+ uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
+ // Make the kernel frametime available.
+ const FrameTime ft{
+ timeStamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+ timeStamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
+ // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
+ mKernelFrameTime.store(ft);
+ if (!audio_is_linear_pcm(mFormat)) {
+ return;
+ }
+
//update frame map
mFrameMap.push(trackFramesReleased, sinkFramesWritten);
@@ -1255,6 +1276,7 @@
// Our timestamps are only updated when the track is on the Thread active list.
// We need to ensure that tracks are not removed before full drain.
ExtendedTimestamp local = timeStamp;
+ bool drained = true; // default assume drained, if no server info found
bool checked = false;
for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
@@ -1263,18 +1285,25 @@
local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
// check drain state from the latest stage in the pipeline.
if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
- mAudioTrackServerProxy->setDrained(
- local.mPosition[i] >= mAudioTrackServerProxy->framesReleased());
+ drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
checked = true;
}
}
}
- if (!checked) { // no server info, assume drained.
- mAudioTrackServerProxy->setDrained(true);
- }
+
+ mAudioTrackServerProxy->setDrained(drained);
// Set correction for flushed frames that are not accounted for in released.
local.mFlushed = mAudioTrackServerProxy->framesFlushed();
mServerProxy->setTimestamp(local);
+
+ // Compute latency info.
+ const bool useTrackTimestamp = !drained;
+ const double latencyMs = useTrackTimestamp
+ ? local.getOutputServerLatencyMs(sampleRate())
+ : timeStamp.getOutputServerLatencyMs(halSampleRate);
+
+ mServerLatencyFromTrack.store(useTrackTimestamp);
+ mServerLatencyMs.store(latencyMs);
}
// ----------------------------------------------------------------------------
@@ -1342,7 +1371,7 @@
mActive = false;
}
-bool AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
+ssize_t AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
{
Buffer *pInBuffer;
Buffer inBuffer;
@@ -1431,9 +1460,12 @@
mBufferQueue.add(pInBuffer);
ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %zu", this,
mThread.unsafe_get(), mBufferQueue.size());
+ // audio data is consumed (stored locally); set frameCount to 0.
+ inBuffer.frameCount = 0;
} else {
ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
mThread.unsafe_get(), this);
+ // TODO: return error for this.
}
}
}
@@ -1444,7 +1476,7 @@
stop();
}
- return outputBufferFull;
+ return frames - inBuffer.frameCount; // number of frames consumed.
}
void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
@@ -1509,7 +1541,7 @@
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, nullptr /* sharedBuffer */,
- AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
+ AUDIO_SESSION_NONE, AID_AUDIOSERVER, flags, TYPE_PATCH),
mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
{
uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
@@ -1528,7 +1560,7 @@
}
status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
- audio_session_t triggerSession)
+ audio_session_t triggerSession)
{
status_t status = Track::start(event, triggerSession);
if (status != NO_ERROR) {
@@ -1672,18 +1704,20 @@
return;
}
- mRecordBufferConverter = new RecordBufferConverter(
- thread->mChannelMask, thread->mFormat, thread->mSampleRate,
- channelMask, format, sampleRate);
- // Check if the RecordBufferConverter construction was successful.
- // If not, don't continue with construction.
- //
- // NOTE: It would be extremely rare that the record track cannot be created
- // for the current device, but a pending or future device change would make
- // the record track configuration valid.
- if (mRecordBufferConverter->initCheck() != NO_ERROR) {
- ALOGE("RecordTrack unable to create record buffer converter");
- return;
+ if (!isDirect()) {
+ mRecordBufferConverter = new RecordBufferConverter(
+ thread->mChannelMask, thread->mFormat, thread->mSampleRate,
+ channelMask, format, sampleRate);
+ // Check if the RecordBufferConverter construction was successful.
+ // If not, don't continue with construction.
+ //
+ // NOTE: It would be extremely rare that the record track cannot be created
+ // for the current device, but a pending or future device change would make
+ // the record track configuration valid.
+ if (mRecordBufferConverter->initCheck() != NO_ERROR) {
+ ALOGE("RecordTrack unable to create record buffer converter");
+ return;
+ }
}
mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
@@ -1694,7 +1728,15 @@
if (flags & AUDIO_INPUT_FLAG_FAST) {
ALOG_ASSERT(thread->mFastTrackAvail);
thread->mFastTrackAvail = false;
+ } else {
+ // TODO: only Normal Record has timestamps (Fast Record does not).
+ mServerLatencySupported = audio_is_linear_pcm(mFormat);
}
+#ifdef TEE_SINK
+ mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+ + "_" + std::to_string(mId)
+ + "_R");
+#endif
}
AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
@@ -1783,19 +1825,22 @@
}
-/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
+void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
{
- result.append("Active Client Session S Flags Format Chn mask SRate Server FrmCnt Sil\n");
+ result.appendFormat("Active Client Session S Flags "
+ " Format Chn mask SRate Source "
+ " Server FrmCnt FrmRdy Sil%s\n",
+ isServerLatencySupported() ? " Latency" : "");
}
void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
{
result.appendFormat("%c%5s %6u %7u %2s 0x%03X "
- "%08X %08X %6u "
- "%08X %6zu %3c\n",
+ "%08X %08X %6u %6X "
+ "%08X %6zu %6zu %3c",
isFastTrack() ? 'F' : ' ',
active ? "yes" : "no",
- (mClient == 0) ? getpid_cached : mClient->pid(),
+ (mClient == 0) ? getpid() : mClient->pid(),
mSessionId,
getTrackStateString(),
mCblk->mFlags,
@@ -1803,11 +1848,25 @@
mFormat,
mChannelMask,
mSampleRate,
+ mAttr.source,
mCblk->mServer,
mFrameCount,
+ mServerProxy->framesReadySafe(),
isSilenced() ? 's' : 'n'
);
+ if (isServerLatencySupported()) {
+ double latencyMs;
+ bool fromTrack;
+ if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
+ // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
+ // or 'k' if estimated from kernel (usually for debugging).
+ result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
+ } else {
+ result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
+ }
+ }
+ result.append("\n");
}
void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
@@ -1837,6 +1896,16 @@
int64_t trackFramesReleased, int64_t sourceFramesRead,
uint32_t halSampleRate, const ExtendedTimestamp ×tamp)
{
+ // Make the kernel frametime available.
+ const FrameTime ft{
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
+ // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
+ mKernelFrameTime.store(ft);
+ if (!audio_is_linear_pcm(mFormat)) {
+ return;
+ }
+
ExtendedTimestamp local = timestamp;
// Convert HAL frames to server-side track frames at track sample rate.
@@ -1850,6 +1919,15 @@
}
}
mServerProxy->setTimestamp(local);
+
+ // Compute latency info.
+ const bool useTrackTimestamp = true; // use track unless debugging.
+ const double latencyMs = - (useTrackTimestamp
+ ? local.getOutputServerLatencyMs(sampleRate())
+ : timestamp.getOutputServerLatencyMs(halSampleRate));
+
+ mServerLatencyFromTrack.store(useTrackTimestamp);
+ mServerLatencyMs.store(latencyMs);
}
status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
@@ -1875,7 +1953,8 @@
: RecordTrack(recordThread, NULL,
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
- buffer, bufferSize, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
+ buffer, bufferSize, AUDIO_SESSION_NONE, AID_AUDIOSERVER,
+ flags, TYPE_PATCH),
mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
{
uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
@@ -1940,13 +2019,14 @@
audio_format_t format,
audio_channel_mask_t channelMask,
audio_session_t sessionId,
+ bool isOut,
uid_t uid,
pid_t pid,
audio_port_handle_t portId)
: TrackBase(thread, NULL, attr, sampleRate, format,
channelMask, (size_t)0 /* frameCount */,
nullptr /* buffer */, (size_t)0 /* bufferSize */,
- sessionId, uid, false /* isOut */,
+ sessionId, uid, isOut,
ALLOC_NONE,
TYPE_DEFAULT, portId),
mPid(pid), mSilenced(false), mSilencedNotified(false)
@@ -1963,7 +2043,7 @@
}
status_t AudioFlinger::MmapThread::MmapTrack::start(AudioSystem::sync_event_t event __unused,
- audio_session_t triggerSession __unused)
+ audio_session_t triggerSession __unused)
{
return NO_ERROR;
}
@@ -1994,19 +2074,27 @@
{
}
-/*static*/ void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
+void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
{
- result.append("Client Session Format Chn mask SRate\n");
+ result.appendFormat("Client Session Format Chn mask SRate Flags %s\n",
+ isOut() ? "Usg CT": "Source");
}
void AudioFlinger::MmapThread::MmapTrack::appendDump(String8& result, bool active __unused)
{
- result.appendFormat("%6u %7u %08X %08X %6u\n",
+ result.appendFormat("%6u %7u %08X %08X %6u 0x%03X ",
mPid,
mSessionId,
mFormat,
mChannelMask,
- mSampleRate);
+ mSampleRate,
+ mAttr.flags);
+ if (isOut()) {
+ result.appendFormat("%3x %2x", mAttr.usage, mAttr.content_type);
+ } else {
+ result.appendFormat("%6x", mAttr.source);
+ }
+ result.append("\n");
}
} // namespace android
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index d29cae1..b75e957 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -13,7 +13,6 @@
$(call include-path-for, audio-utils) \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy/engine/interface \
- frameworks/av/services/audiopolicy/utilities
LOCAL_SHARED_LIBRARIES := \
libcutils \
@@ -22,10 +21,10 @@
libbinder \
libaudioclient \
libhardware_legacy \
- libserviceutility \
libaudiopolicymanager \
libmedia_helper \
libmediametrics \
+ libmediautils \
libeffectsconfig
LOCAL_STATIC_LIBRARIES := \
@@ -74,7 +73,6 @@
LOCAL_C_INCLUDES += \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy/engine/interface \
- frameworks/av/services/audiopolicy/utilities
LOCAL_STATIC_LIBRARIES := \
libaudiopolicycomponents
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 4812b1f..d4c49d9 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -124,17 +124,11 @@
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId) = 0;
// indicates to the audio policy manager that the output starts being used by corresponding stream.
- virtual status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
+ virtual status_t startOutput(audio_port_handle_t portId) = 0;
// indicates to the audio policy manager that the output stops being used by corresponding stream.
- virtual status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
+ virtual status_t stopOutput(audio_port_handle_t portId) = 0;
// releases the output.
- virtual void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
+ virtual void releaseOutput(audio_port_handle_t portId) = 0;
// request an input appropriate for record from the supplied device with supplied parameters.
virtual status_t getInputForAttr(const audio_attributes_t *attr,
@@ -147,16 +141,13 @@
input_type_t *inputType,
audio_port_handle_t *portId) = 0;
// indicates to the audio policy manager that the input starts being used.
- virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session,
+ virtual status_t startInput(audio_port_handle_t portId,
bool silenced,
concurrency_type__mask_t *concurrency) = 0;
// indicates to the audio policy manager that the input stops being used.
- virtual status_t stopInput(audio_io_handle_t input,
- audio_session_t session) = 0;
+ virtual status_t stopInput(audio_port_handle_t portId) = 0;
// releases the input.
- virtual void releaseInput(audio_io_handle_t input,
- audio_session_t session) = 0;
+ virtual void releaseInput(audio_port_handle_t portId) = 0;
//
// volume control functions
@@ -235,9 +226,9 @@
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_patch_handle_t *handle,
+ audio_port_handle_t *portId,
uid_t uid) = 0;
- virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
+ virtual status_t stopAudioSource(audio_port_handle_t portId) = 0;
virtual status_t setMasterMono(bool mono) = 0;
virtual status_t getMasterMono(bool *mono) = 0;
@@ -324,11 +315,6 @@
// function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
- // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
- // over a telephony device during a phone call.
- virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream) = 0;
- virtual status_t stopTone() = 0;
-
// set down link audio volume.
virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index e69e687..9b8f095 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -18,10 +18,10 @@
src/EffectDescriptor.cpp \
src/SoundTriggerSession.cpp \
src/SessionRoute.cpp \
- src/AudioSourceDescriptor.cpp \
src/VolumeCurve.cpp \
src/TypeConverter.cpp \
- src/AudioSession.cpp
+ src/AudioSession.cpp \
+ src/ClientDescriptor.cpp
LOCAL_SHARED_LIBRARIES := \
libcutils \
@@ -35,8 +35,7 @@
$(LOCAL_PATH)/include \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy \
- frameworks/av/services/audiopolicy/utilities \
- system/media/audio_utils/include \
+ $(call include-path-for, audio-utils) \
ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
similarity index 69%
rename from services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
rename to services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
index e0037fc..555412e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
@@ -19,26 +19,19 @@
namespace android {
/**
- * Interface for input descriptors to implement so dependent audio sessions can query information
- * about their context
+ * Interface for I/O descriptors to implement so information about their context
+ * can be queried and updated.
*/
-class AudioSessionInfoProvider
+class AudioIODescriptorInterface
{
public:
- virtual ~AudioSessionInfoProvider() {};
+ virtual ~AudioIODescriptorInterface() {};
virtual audio_config_base_t getConfig() const = 0;
virtual audio_patch_handle_t getPatchHandle() const = 0;
-};
-
-class AudioSessionInfoUpdateListener
-{
-public:
- virtual ~AudioSessionInfoUpdateListener() {};
-
- virtual void onSessionInfoUpdate() const = 0;;
+ virtual void setPatchHandle(audio_patch_handle_t handle) = 0;
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index b25d6d4..44662e5 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -16,9 +16,10 @@
#pragma once
+#include "AudioIODescriptorInterface.h"
#include "AudioPort.h"
#include "AudioSession.h"
-#include "AudioSessionInfoProvider.h"
+#include "ClientDescriptor.h"
#include <utils/Errors.h>
#include <system/audio.h>
#include <utils/SortedVector.h>
@@ -31,7 +32,7 @@
// descriptor for audio inputs. Used to maintain current configuration of each opened audio input
// and keep track of the usage of this input.
-class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
+class AudioInputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
{
public:
explicit AudioInputDescriptor(const sp<IOProfile>& profile,
@@ -66,12 +67,13 @@
AudioSessionCollection getAudioSessions(bool activeOnly) const;
size_t getAudioSessionCount(bool activeOnly) const;
audio_source_t getHighestPrioritySource(bool activeOnly) const;
+ void changeRefCount(audio_session_t session, int delta);
- // implementation of AudioSessionInfoProvider
- virtual audio_config_base_t getConfig() const;
- virtual audio_patch_handle_t getPatchHandle() const;
- void setPatchHandle(audio_patch_handle_t handle);
+ // implementation of AudioIODescriptorInterface
+ audio_config_base_t getConfig() const override;
+ audio_patch_handle_t getPatchHandle() const override;
+ void setPatchHandle(audio_patch_handle_t handle) override;
status_t open(const audio_config_t *config,
audio_devices_t device,
@@ -80,14 +82,20 @@
audio_input_flags_t flags,
audio_io_handle_t *input);
// Called when a stream is about to be started.
- // Note: called after AudioSession::changeActiveCount(1)
+ // Note: called after changeRefCount(session, 1)
status_t start();
// Called after a stream is stopped
- // Note: called after AudioSession::changeActiveCount(-1)
+ // Note: called after changeRefCount(session, -1)
void stop();
void close();
-private:
+ RecordClientMap& clients() { return mClients; }
+ RecordClientVector getClientsForSession(audio_session_t session);
+
+ private:
+
+ void updateSessionRecordingConfiguration(int event, const sp<AudioSession>& audioSession);
+
audio_patch_handle_t mPatchHandle;
audio_port_handle_t mId;
// audio sessions attached to this input
@@ -100,6 +108,9 @@
// We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
SortedVector<audio_session_t> mPreemptedSessions;
AudioPolicyClientInterface *mClientInterface;
+ uint32_t mGlobalRefCount; // non-session-specific ref count
+
+ RecordClientMap mClients;
};
class AudioInputCollection :
@@ -123,6 +134,8 @@
audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
+ sp<AudioInputDescriptor> getInputForClient(audio_port_handle_t portId);
+
status_t dump(int fd) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 5e5d38b..ff0201a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -17,14 +17,14 @@
#pragma once
#include <sys/types.h>
-
-#include "AudioPort.h"
-#include <RoutingStrategy.h>
#include <utils/Errors.h>
#include <utils/Timers.h>
#include <utils/KeyedVector.h>
#include <system/audio.h>
-#include "AudioSourceDescriptor.h"
+#include <RoutingStrategy.h>
+#include "AudioIODescriptorInterface.h"
+#include "AudioPort.h"
+#include "ClientDescriptor.h"
namespace android {
@@ -35,7 +35,7 @@
// descriptor for audio outputs. Used to maintain current configuration of each opened audio output
// and keep track of the usage of this output by each audio stream type.
-class AudioOutputDescriptor: public AudioPortConfig
+class AudioOutputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
{
public:
AudioOutputDescriptor(const sp<AudioPort>& port,
@@ -73,10 +73,14 @@
audio_module_handle_t getModuleHandle() const;
- audio_patch_handle_t getPatchHandle() const { return mPatchHandle; };
- void setPatchHandle(audio_patch_handle_t handle) { mPatchHandle = handle; };
+ // implementation of AudioIODescriptorInterface
+ audio_config_base_t getConfig() const override;
+ audio_patch_handle_t getPatchHandle() const override;
+ void setPatchHandle(audio_patch_handle_t handle) override;
- sp<AudioPort> mPort;
+ TrackClientMap& clients() { return mClients; }
+
+ sp<AudioPort> mPort;
audio_devices_t mDevice; // current device this output is routed to
uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output
nsecs_t mStopTime[AUDIO_STREAM_CNT];
@@ -89,6 +93,7 @@
protected:
audio_patch_handle_t mPatchHandle;
audio_port_handle_t mId;
+ TrackClientMap mClients;
};
// Audio output driven by a software mixer in audio flinger.
@@ -153,7 +158,7 @@
class HwAudioOutputDescriptor: public AudioOutputDescriptor
{
public:
- HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
+ HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
AudioPolicyClientInterface *clientInterface);
virtual ~HwAudioOutputDescriptor() {}
@@ -170,7 +175,7 @@
const struct audio_port_config *srcConfig = NULL) const;
virtual void toAudioPort(struct audio_port *port) const;
- const sp<AudioSourceDescriptor> mSource;
+ const sp<SourceClientDescriptor> mSource;
};
@@ -224,6 +229,8 @@
audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
+ sp<SwAudioOutputDescriptor> getOutputForClient(audio_port_handle_t portId);
+
status_t dump(int fd) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index 43f6ed6..f861b95 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -38,16 +38,24 @@
AudioPolicyConfig(HwModuleCollection &hwModules,
DeviceVector &availableOutputDevices,
DeviceVector &availableInputDevices,
- sp<DeviceDescriptor> &defaultOutputDevices,
+ sp<DeviceDescriptor> &defaultOutputDevice,
VolumeCurvesCollection *volumes = nullptr)
: mHwModules(hwModules),
mAvailableOutputDevices(availableOutputDevices),
mAvailableInputDevices(availableInputDevices),
- mDefaultOutputDevices(defaultOutputDevices),
+ mDefaultOutputDevice(defaultOutputDevice),
mVolumeCurves(volumes),
mIsSpeakerDrcEnabled(false)
{}
+ const std::string& getSource() const {
+ return mSource;
+ }
+
+ void setSource(const std::string& file) {
+ mSource = file;
+ }
+
void setVolumes(const VolumeCurvesCollection &volumes)
{
if (mVolumeCurves != nullptr) {
@@ -100,46 +108,52 @@
void setDefaultOutputDevice(const sp<DeviceDescriptor> &defaultDevice)
{
- mDefaultOutputDevices = defaultDevice;
+ mDefaultOutputDevice = defaultDevice;
}
- const sp<DeviceDescriptor> &getDefaultOutputDevice() const { return mDefaultOutputDevices; }
+ const sp<DeviceDescriptor> &getDefaultOutputDevice() const { return mDefaultOutputDevice; }
void setDefault(void)
{
- mDefaultOutputDevices = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
- sp<HwModule> module;
+ mSource = "AudioPolicyConfig::setDefault";
+ mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
+ mDefaultOutputDevice->addAudioProfile(AudioProfile::createFullDynamic());
sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
- mAvailableOutputDevices.add(mDefaultOutputDevices);
+ defaultInputDevice->addAudioProfile(AudioProfile::createFullDynamic());
+ sp<AudioProfile> micProfile = new AudioProfile(
+ AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000);
+ defaultInputDevice->addAudioProfile(micProfile);
+ mAvailableOutputDevices.add(mDefaultOutputDevice);
mAvailableInputDevices.add(defaultInputDevice);
- module = new HwModule(AUDIO_HARDWARE_MODULE_ID_PRIMARY);
+ sp<HwModule> module = new HwModule(AUDIO_HARDWARE_MODULE_ID_PRIMARY, 2 /*halVersionMajor*/);
+ mHwModules.add(module);
+ mDefaultOutputDevice->attach(module);
+ defaultInputDevice->attach(module);
sp<OutputProfile> outProfile;
outProfile = new OutputProfile(String8("primary"));
outProfile->attach(module);
outProfile->addAudioProfile(
new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 44100));
- outProfile->addSupportedDevice(mDefaultOutputDevices);
+ outProfile->addSupportedDevice(mDefaultOutputDevice);
outProfile->setFlags(AUDIO_OUTPUT_FLAG_PRIMARY);
module->addOutputProfile(outProfile);
sp<InputProfile> inProfile;
inProfile = new InputProfile(String8("primary"));
inProfile->attach(module);
- inProfile->addAudioProfile(
- new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000));
+ inProfile->addAudioProfile(micProfile);
inProfile->addSupportedDevice(defaultInputDevice);
module->addInputProfile(inProfile);
-
- mHwModules.add(module);
}
private:
+ std::string mSource;
HwModuleCollection &mHwModules; /**< Collection of Module, with Profiles, i.e. Mix Ports. */
DeviceVector &mAvailableOutputDevices;
DeviceVector &mAvailableInputDevices;
- sp<DeviceDescriptor> &mDefaultOutputDevices;
+ sp<DeviceDescriptor> &mDefaultOutputDevice;
VolumeCurvesCollection *mVolumeCurves;
// TODO: remove when legacy conf file is removed. true on devices that use DRC on the
// DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index 09a86dd..bd7517f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -153,9 +153,6 @@
class AudioPortConfig : public virtual RefBase
{
public:
- AudioPortConfig();
- virtual ~AudioPortConfig() {}
-
status_t applyAudioPortConfig(const struct audio_port_config *config,
struct audio_port_config *backupConfig = NULL);
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -165,10 +162,11 @@
return (other != 0) &&
(other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
}
- uint32_t mSamplingRate;
- audio_format_t mFormat;
- audio_channel_mask_t mChannelMask;
- struct audio_gain_config mGain;
+ unsigned int mSamplingRate = 0u;
+ audio_format_t mFormat = AUDIO_FORMAT_INVALID;
+ audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+ struct audio_gain_config mGain = { .index = -1 };
+ union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
index 8741c66..a1ee708 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
@@ -16,91 +16,84 @@
#pragma once
-#include "policy.h"
-#include <utils/String8.h>
-#include <utils/SortedVector.h>
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
+#include <vector>
+
#include <system/audio.h>
-#include <cutils/config_utils.h>
+#include <utils/RefBase.h>
+#include <utils/SortedVector.h>
+#include <utils/String8.h>
+
+#include "policy.h"
namespace android {
typedef SortedVector<uint32_t> SampleRateVector;
-typedef SortedVector<audio_channel_mask_t> ChannelsVector;
typedef Vector<audio_format_t> FormatVector;
template <typename T>
-bool operator == (const SortedVector<T> &left, const SortedVector<T> &right);
+bool operator== (const SortedVector<T> &left, const SortedVector<T> &right)
+{
+ if (left.size() != right.size()) {
+ return false;
+ }
+ for (size_t index = 0; index < right.size(); index++) {
+ if (left[index] != right[index]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <typename T>
+bool operator!= (const SortedVector<T> &left, const SortedVector<T> &right)
+{
+ return !(left == right);
+}
+
+class ChannelsVector : public SortedVector<audio_channel_mask_t>
+{
+public:
+ ChannelsVector() = default;
+ ChannelsVector(const ChannelsVector&) = default;
+ ChannelsVector(const SortedVector<audio_channel_mask_t>& sv) :
+ SortedVector<audio_channel_mask_t>(sv) {}
+ ChannelsVector& operator=(const ChannelsVector&) = default;
+
+ // Applies audio_channel_mask_out_to_in to all elements and returns the result.
+ ChannelsVector asInMask() const;
+ // Applies audio_channel_mask_in_to_out to all elements and returns the result.
+ ChannelsVector asOutMask() const;
+};
class AudioProfile : public virtual RefBase
{
public:
- AudioProfile(audio_format_t format,
- audio_channel_mask_t channelMasks,
- uint32_t samplingRate) :
- mName(String8("")),
- mFormat(format)
- {
- mChannelMasks.add(channelMasks);
- mSamplingRates.add(samplingRate);
- }
+ static sp<AudioProfile> createFullDynamic();
+ AudioProfile(audio_format_t format, audio_channel_mask_t channelMasks, uint32_t samplingRate);
AudioProfile(audio_format_t format,
const ChannelsVector &channelMasks,
- const SampleRateVector &samplingRateCollection) :
- mName(String8("")),
- mFormat(format),
- mChannelMasks(channelMasks),
- mSamplingRates(samplingRateCollection)
- {}
+ const SampleRateVector &samplingRateCollection);
audio_format_t getFormat() const { return mFormat; }
-
- void setChannels(const ChannelsVector &channelMasks)
- {
- if (mIsDynamicChannels) {
- mChannelMasks = channelMasks;
- }
- }
const ChannelsVector &getChannels() const { return mChannelMasks; }
-
- void setSampleRates(const SampleRateVector &sampleRates)
- {
- if (mIsDynamicRate) {
- mSamplingRates = sampleRates;
- }
- }
const SampleRateVector &getSampleRates() const { return mSamplingRates; }
+ void setChannels(const ChannelsVector &channelMasks);
+ void setSampleRates(const SampleRateVector &sampleRates);
+ void clear();
bool isValid() const { return hasValidFormat() && hasValidRates() && hasValidChannels(); }
-
- void clear()
- {
- if (mIsDynamicChannels) {
- mChannelMasks.clear();
- }
- if (mIsDynamicRate) {
- mSamplingRates.clear();
- }
- }
-
- inline bool supportsChannels(audio_channel_mask_t channels) const
+ bool supportsChannels(audio_channel_mask_t channels) const
{
return mChannelMasks.indexOf(channels) >= 0;
}
- inline bool supportsRate(uint32_t rate) const
- {
- return mSamplingRates.indexOf(rate) >= 0;
- }
+ bool supportsRate(uint32_t rate) const { return mSamplingRates.indexOf(rate) >= 0; }
status_t checkExact(uint32_t rate, audio_channel_mask_t channels, audio_format_t format) const;
-
status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
audio_channel_mask_t &updatedChannelMask,
audio_port_type_t portType,
audio_port_role_t portRole) const;
-
status_t checkCompatibleSamplingRate(uint32_t samplingRate,
uint32_t &updatedSamplingRate) const;
@@ -136,213 +129,48 @@
class AudioProfileVector : public Vector<sp<AudioProfile> >
{
public:
- ssize_t add(const sp<AudioProfile> &profile)
- {
- ssize_t index = Vector::add(profile);
- // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
- // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
- // [](const audio_format_t *format1, const audio_format_t *format2) {
- // return compareFormats(*format1, *format2);
- // }
- sort(compareFormats);
- return index;
- }
-
+ ssize_t add(const sp<AudioProfile> &profile);
// This API is intended to be used by the policy manager once retrieving capabilities
// for a profile with dynamic format, rate and channels attributes
- ssize_t addProfileFromHal(const sp<AudioProfile> &profileToAdd)
- {
- // Check valid profile to add:
- if (!profileToAdd->hasValidFormat()) {
- return -1;
- }
- if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
- FormatVector formats;
- formats.add(profileToAdd->getFormat());
- setFormats(FormatVector(formats));
- return 0;
- }
- if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
- setSampleRatesFor(profileToAdd->getSampleRates(), profileToAdd->getFormat());
- return 0;
- }
- if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
- setChannelsFor(profileToAdd->getChannels(), profileToAdd->getFormat());
- return 0;
- }
- // Go through the list of profile to avoid duplicates
- for (size_t profileIndex = 0; profileIndex < size(); profileIndex++) {
- const sp<AudioProfile> &profile = itemAt(profileIndex);
- if (profile->isValid() && profile == profileToAdd) {
- // Nothing to do
- return profileIndex;
- }
- }
- profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
- return add(profileToAdd);
- }
-
- sp<AudioProfile> getFirstValidProfile() const
- {
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->isValid()) {
- return itemAt(i);
- }
- }
- return 0;
- }
-
- bool hasValidProfile() const { return getFirstValidProfile() != 0; }
+ ssize_t addProfileFromHal(const sp<AudioProfile> &profileToAdd);
status_t checkExactProfile(uint32_t samplingRate, audio_channel_mask_t channelMask,
audio_format_t format) const;
-
status_t checkCompatibleProfile(uint32_t &samplingRate, audio_channel_mask_t &channelMask,
audio_format_t &format,
audio_port_type_t portType,
audio_port_role_t portRole) const;
+ void clearProfiles();
+ // Assuming that this profile vector contains input profiles,
+ // find the best matching config from 'outputProfiles', according to
+ // the given preferences for audio formats and channel masks.
+ // Note: std::vectors are used because specialized containers for formats
+ // and channels can be sorted and use their own ordering.
+ status_t findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
+ const std::vector<audio_format_t>& preferredFormats, // order: most pref -> least pref
+ const std::vector<audio_channel_mask_t>& preferredOutputChannels,
+ bool preferHigherSamplingRates,
+ audio_config_base *bestOutputConfig) const;
- FormatVector getSupportedFormats() const
- {
- FormatVector supportedFormats;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->hasValidFormat()) {
- supportedFormats.add(itemAt(i)->getFormat());
- }
- }
- return supportedFormats;
- }
+ sp<AudioProfile> getFirstValidProfile() const;
+ sp<AudioProfile> getFirstValidProfileFor(audio_format_t format) const;
+ bool hasValidProfile() const { return getFirstValidProfile() != 0; }
- bool hasDynamicProfile() const
- {
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->isDynamic()) {
- return true;
- }
- }
- return false;
- }
-
- bool hasDynamicFormat() const
- {
- return getProfileFor(gDynamicFormat) != 0;
- }
-
- bool hasDynamicChannelsFor(audio_format_t format) const
- {
- for (size_t i = 0; i < size(); i++) {
- sp<AudioProfile> profile = itemAt(i);
- if (profile->getFormat() == format && profile->isDynamicChannels()) {
- return true;
- }
- }
- return false;
- }
-
- bool hasDynamicRateFor(audio_format_t format) const
- {
- for (size_t i = 0; i < size(); i++) {
- sp<AudioProfile> profile = itemAt(i);
- if (profile->getFormat() == format && profile->isDynamicRate()) {
- return true;
- }
- }
- return false;
- }
+ FormatVector getSupportedFormats() const;
+ bool hasDynamicChannelsFor(audio_format_t format) const;
+ bool hasDynamicFormat() const { return getProfileFor(gDynamicFormat) != 0; }
+ bool hasDynamicProfile() const;
+ bool hasDynamicRateFor(audio_format_t format) const;
// One audio profile will be added for each format supported by Audio HAL
- void setFormats(const FormatVector &formats)
- {
- // Only allow to change the format of dynamic profile
- sp<AudioProfile> dynamicFormatProfile = getProfileFor(gDynamicFormat);
- if (dynamicFormatProfile == 0) {
- return;
- }
- for (size_t i = 0; i < formats.size(); i++) {
- sp<AudioProfile> profile = new AudioProfile(formats[i],
- dynamicFormatProfile->getChannels(),
- dynamicFormatProfile->getSampleRates());
- profile->setDynamicFormat(true);
- profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
- profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
- add(profile);
- }
- }
+ void setFormats(const FormatVector &formats);
- void clearProfiles()
- {
- for (size_t i = size(); i != 0; ) {
- sp<AudioProfile> profile = itemAt(--i);
- if (profile->isDynamicFormat() && profile->hasValidFormat()) {
- removeAt(i);
- continue;
- }
- profile->clear();
- }
- }
-
- void dump(int fd, int spaces) const
- {
- const size_t SIZE = 256;
- char buffer[SIZE];
-
- snprintf(buffer, SIZE, "%*s- Profiles:\n", spaces, "");
- write(fd, buffer, strlen(buffer));
- for (size_t i = 0; i < size(); i++) {
- snprintf(buffer, SIZE, "%*sProfile %zu:", spaces + 4, "", i);
- write(fd, buffer, strlen(buffer));
- itemAt(i)->dump(fd, spaces + 8);
- }
- }
+ void dump(int fd, int spaces) const;
private:
- void setSampleRatesFor(const SampleRateVector &sampleRates, audio_format_t format)
- {
- for (size_t i = 0; i < size(); i++) {
- sp<AudioProfile> profile = itemAt(i);
- if (profile->getFormat() == format && profile->isDynamicRate()) {
- if (profile->hasValidRates()) {
- // Need to create a new profile with same format
- sp<AudioProfile> profileToAdd = new AudioProfile(format, profile->getChannels(),
- sampleRates);
- profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
- add(profileToAdd);
- } else {
- profile->setSampleRates(sampleRates);
- }
- return;
- }
- }
- }
-
- void setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format)
- {
- for (size_t i = 0; i < size(); i++) {
- sp<AudioProfile> profile = itemAt(i);
- if (profile->getFormat() == format && profile->isDynamicChannels()) {
- if (profile->hasValidChannels()) {
- // Need to create a new profile with same format
- sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMasks,
- profile->getSampleRates());
- profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
- add(profileToAdd);
- } else {
- profile->setChannels(channelMasks);
- }
- return;
- }
- }
- }
-
- sp<AudioProfile> getProfileFor(audio_format_t format) const
- {
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getFormat() == format) {
- return itemAt(i);
- }
- }
- return 0;
- }
+ sp<AudioProfile> getProfileFor(audio_format_t format) const;
+ void setSampleRatesFor(const SampleRateVector &sampleRates, audio_format_t format);
+ void setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format);
static int compareFormats(const sp<AudioProfile> *profile1, const sp<AudioProfile> *profile2);
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index dd5247d..1636d3a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -23,13 +23,13 @@
#include <utils/KeyedVector.h>
#include <media/AudioPolicy.h>
#include <media/IAudioPolicyServiceClient.h>
-#include "AudioSessionInfoProvider.h"
+#include "AudioIODescriptorInterface.h"
namespace android {
class AudioPolicyClientInterface;
-class AudioSession : public RefBase, public AudioSessionInfoUpdateListener
+class AudioSession : public RefBase
{
public:
AudioSession(audio_session_t session,
@@ -39,9 +39,7 @@
audio_channel_mask_t channelMask,
audio_input_flags_t flags,
uid_t uid,
- bool isSoundTrigger,
- AudioMix* policyMix,
- AudioPolicyClientInterface *clientInterface);
+ bool isSoundTrigger);
status_t dump(int fd, int spaces, int index) const;
@@ -50,6 +48,8 @@
audio_format_t format() const { return mConfig.format; }
uint32_t sampleRate() const { return mConfig.sample_rate; }
audio_channel_mask_t channelMask() const { return mConfig.channel_mask; }
+ audio_config_base config() const { return mConfig; }
+ record_client_info_t recordClientInfo() const { return mRecordClientInfo; }
audio_input_flags_t flags() const { return mFlags; }
uid_t uid() const { return mRecordClientInfo.uid; }
void setUid(uid_t uid) { mRecordClientInfo.uid = uid; }
@@ -63,10 +63,6 @@
uint32_t changeOpenCount(int delta);
uint32_t changeActiveCount(int delta);
- void setInfoProvider(AudioSessionInfoProvider *provider);
- // implementation of AudioSessionInfoUpdateListener
- virtual void onSessionInfoUpdate() const;
-
private:
record_client_info_t mRecordClientInfo;
const struct audio_config_base mConfig;
@@ -75,19 +71,14 @@
bool mSilenced;
uint32_t mOpenCount;
uint32_t mActiveCount;
- AudioMix* mPolicyMix; // non NULL when used by a dynamic policy
- AudioPolicyClientInterface* mClientInterface;
- const AudioSessionInfoProvider* mInfoProvider;
};
class AudioSessionCollection :
- public DefaultKeyedVector<audio_session_t, sp<AudioSession> >,
- public AudioSessionInfoUpdateListener
+ public DefaultKeyedVector<audio_session_t, sp<AudioSession> >
{
public:
status_t addSession(audio_session_t session,
- const sp<AudioSession>& audioSession,
- AudioSessionInfoProvider *provider);
+ const sp<AudioSession>& audioSession);
status_t removeSession(audio_session_t session);
@@ -99,9 +90,6 @@
bool isSourceActive(audio_source_t source) const;
audio_source_t getHighestPrioritySource(bool activeOnly) const;
- // implementation of AudioSessionInfoUpdateListener
- virtual void onSessionInfoUpdate() const;
-
status_t dump(int fd, int spaces) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
deleted file mode 100644
index 0d90f42..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <system/audio.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/RefBase.h>
-#include <RoutingStrategy.h>
-#include <AudioPatch.h>
-
-namespace android {
-
-class SwAudioOutputDescriptor;
-class HwAudioOutputDescriptor;
-class DeviceDescriptor;
-
-class AudioSourceDescriptor: public RefBase
-{
-public:
- AudioSourceDescriptor(const sp<DeviceDescriptor> device, const audio_attributes_t *attributes,
- uid_t uid) :
- mDevice(device), mAttributes(*attributes), mUid(uid) {}
- virtual ~AudioSourceDescriptor() {}
-
- audio_patch_handle_t getHandle() const { return mPatchDesc->mHandle; }
-
- status_t dump(int fd);
-
- const sp<DeviceDescriptor> mDevice;
- const audio_attributes_t mAttributes;
- uid_t mUid;
- sp<AudioPatch> mPatchDesc;
- wp<SwAudioOutputDescriptor> mSwOutput;
- wp<HwAudioOutputDescriptor> mHwOutput;
-};
-
-class AudioSourceCollection :
- public DefaultKeyedVector< audio_patch_handle_t, sp<AudioSourceDescriptor> >
-{
-public:
- status_t dump(int fd) const;
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
new file mode 100644
index 0000000..9efe57f
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <vector>
+#include <map>
+#include <unistd.h>
+#include <sys/types.h>
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include "AudioPatch.h"
+
+namespace android {
+
+class DeviceDescriptor;
+class HwAudioOutputDescriptor;
+class SwAudioOutputDescriptor;
+
+class ClientDescriptor: public RefBase
+{
+public:
+ ClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
+ audio_attributes_t attributes, audio_config_base_t config,
+ audio_port_handle_t preferredDeviceId) :
+ mPortId(portId), mUid(uid), mSessionId(sessionId), mAttributes(attributes),
+ mConfig(config), mPreferredDeviceId(preferredDeviceId), mActive(false) {}
+ ~ClientDescriptor() override = default;
+
+ status_t dump(int fd, int spaces, int index);
+ virtual status_t dump(String8& dst, int spaces, int index);
+
+ audio_port_handle_t portId() const { return mPortId; }
+ uid_t uid() const { return mUid; }
+ audio_session_t session() const { return mSessionId; };
+ audio_attributes_t attributes() const { return mAttributes; }
+ audio_config_base_t config() const { return mConfig; }
+ audio_port_handle_t preferredDeviceId() const { return mPreferredDeviceId; };
+ void setActive(bool active) { mActive = active; }
+ bool active() const { return mActive; }
+
+private:
+ const audio_port_handle_t mPortId; // unique Id for this client
+ const uid_t mUid; // client UID
+ const audio_session_t mSessionId; // audio session ID
+ const audio_attributes_t mAttributes; // usage...
+ const audio_config_base_t mConfig;
+ const audio_port_handle_t mPreferredDeviceId; // selected input device port ID
+ bool mActive;
+
+protected:
+ // FIXME: use until other descriptor classes have a dump to String8 method
+ int mDumpFd;
+};
+
+class TrackClientDescriptor: public ClientDescriptor
+{
+public:
+ TrackClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
+ audio_attributes_t attributes, audio_config_base_t config,
+ audio_port_handle_t preferredDeviceId,
+ audio_stream_type_t stream, audio_output_flags_t flags) :
+ ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
+ mStream(stream), mFlags(flags) {}
+ ~TrackClientDescriptor() override = default;
+
+ using ClientDescriptor::dump;
+ status_t dump(String8& dst, int spaces, int index) override;
+
+ audio_output_flags_t flags() const { return mFlags; }
+ audio_stream_type_t stream() const { return mStream; }
+
+private:
+ const audio_stream_type_t mStream;
+ const audio_output_flags_t mFlags;
+};
+
+class RecordClientDescriptor: public ClientDescriptor
+{
+public:
+ RecordClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
+ audio_attributes_t attributes, audio_config_base_t config,
+ audio_port_handle_t preferredDeviceId,
+ audio_source_t source, audio_input_flags_t flags) :
+ ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
+ mSource(source), mFlags(flags) {}
+ ~RecordClientDescriptor() override = default;
+
+ using ClientDescriptor::dump;
+ status_t dump(String8& dst, int spaces, int index) override;
+
+ audio_source_t source() const { return mSource; }
+ audio_input_flags_t flags() const { return mFlags; }
+
+private:
+ const audio_source_t mSource;
+ const audio_input_flags_t mFlags;
+};
+
+class SourceClientDescriptor: public TrackClientDescriptor
+{
+public:
+ SourceClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
+ const sp<AudioPatch>& patchDesc, const sp<DeviceDescriptor>& srcDevice,
+ audio_stream_type_t stream);
+ ~SourceClientDescriptor() override = default;
+
+ sp<AudioPatch> patchDesc() const { return mPatchDesc; }
+ sp<DeviceDescriptor> srcDevice() const { return mSrcDevice; };
+ wp<SwAudioOutputDescriptor> swOutput() const { return mSwOutput; }
+ void setSwOutput(const sp<SwAudioOutputDescriptor>& swOutput);
+ wp<HwAudioOutputDescriptor> hwOutput() const { return mHwOutput; }
+ void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
+
+ using ClientDescriptor::dump;
+ status_t dump(String8& dst, int spaces, int index) override;
+
+ private:
+ const sp<AudioPatch> mPatchDesc;
+ const sp<DeviceDescriptor> mSrcDevice;
+ wp<SwAudioOutputDescriptor> mSwOutput;
+ wp<HwAudioOutputDescriptor> mHwOutput;
+};
+
+class SourceClientCollection :
+ public DefaultKeyedVector< audio_port_handle_t, sp<SourceClientDescriptor> >
+{
+public:
+ status_t dump(int fd) const;
+};
+
+typedef std::vector< sp<TrackClientDescriptor> > TrackClientVector;
+typedef std::map< audio_port_handle_t, sp<TrackClientDescriptor> > TrackClientMap;
+typedef std::vector< sp<RecordClientDescriptor> > RecordClientVector;
+typedef std::map< audio_port_handle_t, sp<RecordClientDescriptor> > RecordClientMap;
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 92a4c3e..c08e752 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -76,13 +76,14 @@
audio_devices_t types() const { return mDeviceTypes; }
- sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8& address) const;
- DeviceVector getDevicesFromType(audio_devices_t types) const;
+ // If 'address' is empty, a device with a non-empty address may be returned
+ // if there is no device with the specified 'type' and empty address.
+ sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address) const;
+ DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
sp<DeviceDescriptor> getDeviceFromTagName(const String8 &tagName) const;
- DeviceVector getDevicesFromTypeAddr(audio_devices_t type, const String8& address) const;
-
- audio_devices_t getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
+ DeviceVector getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
+ audio_devices_t getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const;
status_t dump(int fd, const String8 &tag, int spaces = 0, bool verbose = true) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index cb9f49e..05cfc31 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -107,7 +107,7 @@
sp<DeviceDescriptor> getDeviceDescriptor(const audio_devices_t device,
const char *device_address,
const char *device_name,
- bool matchAdress = true) const;
+ bool matchAddress = true) const;
status_t dump(int fd) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 92332fb..2770e74 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -32,7 +32,7 @@
: mIoHandle(0),
mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0),
- mClientInterface(clientInterface)
+ mClientInterface(clientInterface), mGlobalRefCount(0)
{
if (profile != NULL) {
profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -164,7 +164,7 @@
status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
const sp<AudioSession>& audioSession) {
- return mSessions.addSession(session, audioSession, /*AudioSessionInfoProvider*/this);
+ return mSessions.addSession(session, audioSession);
}
status_t AudioInputDescriptor::removeAudioSession(audio_session_t session) {
@@ -179,7 +179,11 @@
void AudioInputDescriptor::setPatchHandle(audio_patch_handle_t handle)
{
mPatchHandle = handle;
- mSessions.onSessionInfoUpdate();
+ for (size_t i = 0; i < mSessions.size(); i++) {
+ if (mSessions[i]->activeCount() > 0) {
+ updateSessionRecordingConfiguration(RECORD_CONFIG_EVENT_START, mSessions[i]);
+ }
+ }
}
audio_config_base_t AudioInputDescriptor::getConfig() const
@@ -266,7 +270,7 @@
LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
__FUNCTION__, mProfile->curOpenCount);
// do not call stop() here as stop() is supposed to be called after
- // AudioSession::changeActiveCount(-1) and we don't know how many sessions
+ // changeRefCount(session, -1) and we don't know how many sessions
// are still active at this time
if (isActive()) {
mProfile->curActiveCount--;
@@ -276,6 +280,78 @@
}
}
+void AudioInputDescriptor::changeRefCount(audio_session_t session, int delta)
+{
+ sp<AudioSession> audioSession = mSessions.valueFor(session);
+ if (audioSession == 0) {
+ return;
+ }
+ // handle session-independent ref count
+ uint32_t oldGlobalRefCount = mGlobalRefCount;
+ if ((delta + (int)mGlobalRefCount) < 0) {
+ ALOGW("changeRefCount() invalid delta %d globalRefCount %d", delta, mGlobalRefCount);
+ delta = -((int)mGlobalRefCount);
+ }
+ mGlobalRefCount += delta;
+ if ((oldGlobalRefCount == 0) && (mGlobalRefCount > 0)) {
+ if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+ {
+ mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+ MIX_STATE_MIXING);
+ }
+
+ } else if ((oldGlobalRefCount > 0) && (mGlobalRefCount == 0)) {
+ if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+ {
+ mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+ MIX_STATE_IDLE);
+ }
+ }
+
+ uint32_t oldActiveCount = audioSession->activeCount();
+ if ((delta + (int)oldActiveCount) < 0) {
+ ALOGW("changeRefCount() invalid delta %d for sesion %d active count %d",
+ delta, session, oldActiveCount);
+ delta = -((int)oldActiveCount);
+ }
+
+ audioSession->changeActiveCount(delta);
+
+ int event = RECORD_CONFIG_EVENT_NONE;
+ if ((oldActiveCount == 0) && (audioSession->activeCount() > 0)) {
+ event = RECORD_CONFIG_EVENT_START;
+ } else if ((oldActiveCount > 0) && (audioSession->activeCount() == 0)) {
+ event = RECORD_CONFIG_EVENT_STOP;
+ }
+ if (event != RECORD_CONFIG_EVENT_NONE) {
+ updateSessionRecordingConfiguration(event, audioSession);
+ }
+
+}
+
+void AudioInputDescriptor::updateSessionRecordingConfiguration(
+ int event, const sp<AudioSession>& audioSession) {
+
+ const audio_config_base_t sessionConfig = audioSession->config();
+ const record_client_info_t recordClientInfo = audioSession->recordClientInfo();
+ const audio_config_base_t config = getConfig();
+ mClientInterface->onRecordingConfigurationUpdate(event,
+ &recordClientInfo, &sessionConfig,
+ &config, mPatchHandle);
+}
+
+RecordClientVector AudioInputDescriptor::getClientsForSession(
+ audio_session_t session)
+{
+ RecordClientVector clients;
+ for (const auto &client : mClients) {
+ if (client.second->session() == session) {
+ clients.push_back(client.second);
+ }
+ }
+ return clients;
+}
+
status_t AudioInputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
@@ -297,6 +373,13 @@
mSessions.dump(fd, 1);
+ size_t index = 0;
+ result = " AudioRecord clients:\n";
+ for (const auto& client: mClients) {
+ client.second->dump(result, 2, index++);
+ }
+ result.append(" \n");
+ write(fd, result.string(), result.size());
return NO_ERROR;
}
@@ -359,6 +442,19 @@
return devices;
}
+sp<AudioInputDescriptor> AudioInputCollection::getInputForClient(audio_port_handle_t portId)
+{
+ for (size_t i = 0; i < size(); i++) {
+ sp<AudioInputDescriptor> inputDesc = valueAt(i);
+ for (const auto& client : inputDesc->clients()) {
+ if (client.second->portId() == portId) {
+ return inputDesc;
+ }
+ }
+ }
+ return 0;
+}
+
status_t AudioInputCollection::dump(int fd) const
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 294a2a6..39fce4d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -55,11 +55,28 @@
}
}
+audio_config_base_t AudioOutputDescriptor::getConfig() const
+{
+ const audio_config_base_t config = { .sample_rate = mSamplingRate, .channel_mask = mChannelMask,
+ .format = mFormat };
+ return config;
+}
+
audio_module_handle_t AudioOutputDescriptor::getModuleHandle() const
{
return mPort.get() != nullptr ? mPort->getModuleHandle() : AUDIO_MODULE_HANDLE_NONE;
}
+audio_patch_handle_t AudioOutputDescriptor::getPatchHandle() const
+{
+ return mPatchHandle;
+}
+
+void AudioOutputDescriptor::setPatchHandle(audio_patch_handle_t handle)
+{
+ mPatchHandle = handle;
+}
+
audio_port_handle_t AudioOutputDescriptor::getId() const
{
return mId;
@@ -207,6 +224,13 @@
i, mCurVolume[i], mRefCount[i], mMuteCount[i]);
result.append(buffer);
}
+
+ result.append(" AudioTrack clients:\n");
+ size_t index = 0;
+ for (const auto& client : mClients) {
+ client.second->dump(result, 2, index++);
+ }
+ result.append(" \n");
write(fd, result.string(), result.size());
return NO_ERROR;
@@ -534,9 +558,9 @@
}
// HwAudioOutputDescriptor implementation
-HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
+HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
AudioPolicyClientInterface *clientInterface)
- : AudioOutputDescriptor(source->mDevice, clientInterface),
+ : AudioOutputDescriptor(source->srcDevice(), clientInterface),
mSource(source)
{
}
@@ -552,7 +576,7 @@
snprintf(buffer, SIZE, "Source:\n");
result.append(buffer);
write(fd, result.string(), result.size());
- mSource->dump(fd);
+ mSource->dump(fd, 0, 0);
return NO_ERROR;
}
@@ -566,13 +590,13 @@
struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig) const
{
- mSource->mDevice->toAudioPortConfig(dstConfig, srcConfig);
+ mSource->srcDevice()->toAudioPortConfig(dstConfig, srcConfig);
}
void HwAudioOutputDescriptor::toAudioPort(
struct audio_port *port) const
{
- mSource->mDevice->toAudioPort(port);
+ mSource->srcDevice()->toAudioPort(port);
}
@@ -714,6 +738,18 @@
return devices;
}
+sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getOutputForClient(audio_port_handle_t portId)
+{
+ for (size_t i = 0; i < size(); i++) {
+ sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
+ for (const auto& client : outputDesc->clients()) {
+ if (client.second->portId() == portId) {
+ return outputDesc;
+ }
+ }
+ }
+ return 0;
+}
status_t SwAudioOutputCollection::dump(int fd) const
{
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index a9fe48d..e78e121 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -34,51 +34,32 @@
{
}
+static String8 dumpPatchEndpoints(
+ int spaces, const char *prefix, int count, const audio_port_config *cfgs)
+{
+ String8 result;
+ for (int i = 0; i < count; ++i) {
+ const audio_port_config &cfg = cfgs[i];
+ result.appendFormat("%*s [%s %d] ", spaces, "", prefix, i + 1);
+ if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
+ std::string device;
+ deviceToString(cfg.ext.device.type, device);
+ result.appendFormat("Device ID %d %s", cfg.id, device.c_str());
+ } else {
+ result.appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
+ }
+ result.append("\n");
+ }
+ return result;
+}
+
status_t AudioPatch::dump(int fd, int spaces, int index) const
{
- const size_t SIZE = 256;
- char buffer[SIZE];
String8 result;
-
- snprintf(buffer, SIZE, "%*sAudio patch %d:\n", spaces, "", index+1);
- result.append(buffer);
- snprintf(buffer, SIZE, "%*s- handle: %2d\n", spaces, "", mHandle);
- result.append(buffer);
- snprintf(buffer, SIZE, "%*s- audio flinger handle: %2d\n", spaces, "", mAfPatchHandle);
- result.append(buffer);
- snprintf(buffer, SIZE, "%*s- owner uid: %2d\n", spaces, "", mUid);
- result.append(buffer);
- snprintf(buffer, SIZE, "%*s- %d sources:\n", spaces, "", mPatch.num_sources);
- result.append(buffer);
- for (size_t i = 0; i < mPatch.num_sources; i++) {
- if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
- std::string device;
- deviceToString(mPatch.sources[i].ext.device.type, device);
- snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
- mPatch.sources[i].id,
- device.c_str());
- } else {
- snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
- mPatch.sources[i].id, mPatch.sources[i].ext.mix.handle);
- }
- result.append(buffer);
- }
- snprintf(buffer, SIZE, "%*s- %d sinks:\n", spaces, "", mPatch.num_sinks);
- result.append(buffer);
- for (size_t i = 0; i < mPatch.num_sinks; i++) {
- if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
- std::string device;
- deviceToString(mPatch.sinks[i].ext.device.type, device);
- snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
- mPatch.sinks[i].id,
- device.c_str());
- } else {
- snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
- mPatch.sinks[i].id, mPatch.sinks[i].ext.mix.handle);
- }
- result.append(buffer);
- }
-
+ result.appendFormat("%*sPatch %d: owner uid %4d, handle %2d, af handle %2d\n",
+ spaces, "", index + 1, mUid, mHandle, mAfPatchHandle);
+ result.append(dumpPatchEndpoints(spaces, "src ", mPatch.num_sources, mPatch.sources));
+ result.append(dumpPatchEndpoints(spaces, "sink", mPatch.num_sinks, mPatch.sinks));
write(fd, result.string(), result.size());
return NO_ERROR;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index d85562e..3fe37ab 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -386,22 +386,12 @@
// --- AudioPortConfig class implementation
-AudioPortConfig::AudioPortConfig()
-{
- mSamplingRate = 0;
- mChannelMask = AUDIO_CHANNEL_NONE;
- mFormat = AUDIO_FORMAT_INVALID;
- memset(&mGain, 0, sizeof(struct audio_gain_config));
- mGain.index = -1;
-}
-
status_t AudioPortConfig::applyAudioPortConfig(const struct audio_port_config *config,
struct audio_port_config *backupConfig)
{
- struct audio_port_config localBackupConfig;
+ struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
status_t status = NO_ERROR;
- localBackupConfig.config_mask = config->config_mask;
toAudioPortConfig(&localBackupConfig);
sp<AudioPort> audioport = getAudioPort();
@@ -425,6 +415,9 @@
if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
mGain = config->gain;
}
+ if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+ mFlags = config->flags;
+ }
exit:
if (status != NO_ERROR) {
@@ -436,33 +429,38 @@
return status;
}
+namespace {
+
+template<typename T>
+void updateField(
+ const T& portConfigField, T audio_port_config::*port_config_field,
+ struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig,
+ unsigned int configMask, T defaultValue)
+{
+ if (dstConfig->config_mask & configMask) {
+ if ((srcConfig != nullptr) && (srcConfig->config_mask & configMask)) {
+ dstConfig->*port_config_field = srcConfig->*port_config_field;
+ } else {
+ dstConfig->*port_config_field = portConfigField;
+ }
+ } else {
+ dstConfig->*port_config_field = defaultValue;
+ }
+}
+
+} // namespace
+
void AudioPortConfig::toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig) const
{
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
- dstConfig->sample_rate = mSamplingRate;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE)) {
- dstConfig->sample_rate = srcConfig->sample_rate;
- }
- } else {
- dstConfig->sample_rate = 0;
- }
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
- dstConfig->channel_mask = mChannelMask;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK)) {
- dstConfig->channel_mask = srcConfig->channel_mask;
- }
- } else {
- dstConfig->channel_mask = AUDIO_CHANNEL_NONE;
- }
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
- dstConfig->format = mFormat;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT)) {
- dstConfig->format = srcConfig->format;
- }
- } else {
- dstConfig->format = AUDIO_FORMAT_INVALID;
- }
+ updateField(mSamplingRate, &audio_port_config::sample_rate,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_SAMPLE_RATE, 0u);
+ updateField(mChannelMask, &audio_port_config::channel_mask,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_CHANNEL_MASK,
+ (audio_channel_mask_t)AUDIO_CHANNEL_NONE);
+ updateField(mFormat, &audio_port_config::format,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_FORMAT, AUDIO_FORMAT_INVALID);
+
sp<AudioPort> audioport = getAudioPort();
if ((dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) && audioport != NULL) {
dstConfig->gain = mGain;
@@ -478,6 +476,9 @@
} else {
dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
}
+
+ updateField(mFlags, &audio_port_config::flags,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
}
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index fd6fc1c..d04beec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -14,20 +14,112 @@
* limitations under the License.
*/
+#include <algorithm>
+#include <set>
+#include <string>
+
#define LOG_TAG "APM::AudioProfile"
//#define LOG_NDEBUG 0
-#include "AudioProfile.h"
-#include "AudioPort.h"
-#include "HwModule.h"
-#include "AudioGain.h"
-#include <utils/SortedVector.h>
-#include "TypeConverter.h"
#include <media/AudioResamplerPublic.h>
-#include <algorithm>
+#include <utils/Errors.h>
+
+#include "AudioGain.h"
+#include "AudioPort.h"
+#include "AudioProfile.h"
+#include "HwModule.h"
+#include "TypeConverter.h"
namespace android {
+ChannelsVector ChannelsVector::asInMask() const
+{
+ ChannelsVector inMaskVector;
+ for (const auto& channel : *this) {
+ if (audio_channel_mask_out_to_in(channel) != AUDIO_CHANNEL_INVALID) {
+ inMaskVector.add(audio_channel_mask_out_to_in(channel));
+ }
+ }
+ return inMaskVector;
+}
+
+ChannelsVector ChannelsVector::asOutMask() const
+{
+ ChannelsVector outMaskVector;
+ for (const auto& channel : *this) {
+ if (audio_channel_mask_in_to_out(channel) != AUDIO_CHANNEL_INVALID) {
+ outMaskVector.add(audio_channel_mask_in_to_out(channel));
+ }
+ }
+ return outMaskVector;
+}
+
+bool operator == (const AudioProfile &left, const AudioProfile &compareTo)
+{
+ return (left.getFormat() == compareTo.getFormat()) &&
+ (left.getChannels() == compareTo.getChannels()) &&
+ (left.getSampleRates() == compareTo.getSampleRates());
+}
+
+static AudioProfile* createFullDynamicImpl()
+{
+ AudioProfile* dynamicProfile = new AudioProfile(gDynamicFormat,
+ ChannelsVector(), SampleRateVector());
+ dynamicProfile->setDynamicFormat(true);
+ dynamicProfile->setDynamicChannels(true);
+ dynamicProfile->setDynamicRate(true);
+ return dynamicProfile;
+}
+
+// static
+sp<AudioProfile> AudioProfile::createFullDynamic()
+{
+ static sp<AudioProfile> dynamicProfile = createFullDynamicImpl();
+ return dynamicProfile;
+}
+
+AudioProfile::AudioProfile(audio_format_t format,
+ audio_channel_mask_t channelMasks,
+ uint32_t samplingRate) :
+ mName(String8("")),
+ mFormat(format)
+{
+ mChannelMasks.add(channelMasks);
+ mSamplingRates.add(samplingRate);
+}
+
+AudioProfile::AudioProfile(audio_format_t format,
+ const ChannelsVector &channelMasks,
+ const SampleRateVector &samplingRateCollection) :
+ mName(String8("")),
+ mFormat(format),
+ mChannelMasks(channelMasks),
+ mSamplingRates(samplingRateCollection) {}
+
+void AudioProfile::setChannels(const ChannelsVector &channelMasks)
+{
+ if (mIsDynamicChannels) {
+ mChannelMasks = channelMasks;
+ }
+}
+
+void AudioProfile::setSampleRates(const SampleRateVector &sampleRates)
+{
+ if (mIsDynamicRate) {
+ mSamplingRates = sampleRates;
+ }
+}
+
+void AudioProfile::clear()
+{
+ if (mIsDynamicChannels) {
+ mChannelMasks.clear();
+ }
+ if (mIsDynamicRate) {
+ mSamplingRates.clear();
+ }
+}
+
status_t AudioProfile::checkExact(uint32_t samplingRate, audio_channel_mask_t channelMask,
audio_format_t format) const
{
@@ -39,27 +131,6 @@
return BAD_VALUE;
}
-template <typename T>
-bool operator == (const SortedVector<T> &left, const SortedVector<T> &right)
-{
- if (left.size() != right.size()) {
- return false;
- }
- for(size_t index = 0; index < right.size(); index++) {
- if (left[index] != right[index]) {
- return false;
- }
- }
- return true;
-}
-
-bool operator == (const AudioProfile &left, const AudioProfile &compareTo)
-{
- return (left.getFormat() == compareTo.getFormat()) &&
- (left.getChannels() == compareTo.getChannels()) &&
- (left.getSampleRates() == compareTo.getSampleRates());
-}
-
status_t AudioProfile::checkCompatibleSamplingRate(uint32_t samplingRate,
uint32_t &updatedSamplingRate) const
{
@@ -225,6 +296,50 @@
write(fd, result.string(), result.size());
}
+ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
+{
+ ssize_t index = Vector::add(profile);
+ // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
+ // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
+ // [](const audio_format_t *format1, const audio_format_t *format2) {
+ // return compareFormats(*format1, *format2);
+ // }
+ sort(compareFormats);
+ return index;
+}
+
+ssize_t AudioProfileVector::addProfileFromHal(const sp<AudioProfile> &profileToAdd)
+{
+ // Check valid profile to add:
+ if (!profileToAdd->hasValidFormat()) {
+ return -1;
+ }
+ if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+ FormatVector formats;
+ formats.add(profileToAdd->getFormat());
+ setFormats(FormatVector(formats));
+ return 0;
+ }
+ if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
+ setSampleRatesFor(profileToAdd->getSampleRates(), profileToAdd->getFormat());
+ return 0;
+ }
+ if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+ setChannelsFor(profileToAdd->getChannels(), profileToAdd->getFormat());
+ return 0;
+ }
+ // Go through the list of profile to avoid duplicates
+ for (size_t profileIndex = 0; profileIndex < size(); profileIndex++) {
+ const sp<AudioProfile> &profile = itemAt(profileIndex);
+ if (profile->isValid() && profile == profileToAdd) {
+ // Nothing to do
+ return profileIndex;
+ }
+ }
+ profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
+ return add(profileToAdd);
+}
+
status_t AudioProfileVector::checkExactProfile(uint32_t samplingRate,
audio_channel_mask_t channelMask,
audio_format_t format) const
@@ -281,6 +396,233 @@
return BAD_VALUE;
}
+void AudioProfileVector::clearProfiles()
+{
+ for (size_t i = size(); i != 0; ) {
+ sp<AudioProfile> profile = itemAt(--i);
+ if (profile->isDynamicFormat() && profile->hasValidFormat()) {
+ removeAt(i);
+ continue;
+ }
+ profile->clear();
+ }
+}
+
+// Returns an intersection between two possibly unsorted vectors and the contents of 'order'.
+// The result is ordered according to 'order'.
+template<typename T, typename Order>
+std::vector<typename T::value_type> intersectFilterAndOrder(
+ const T& input1, const T& input2, const Order& order)
+{
+ std::set<typename T::value_type> set1{input1.begin(), input1.end()};
+ std::set<typename T::value_type> set2{input2.begin(), input2.end()};
+ std::set<typename T::value_type> common;
+ std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+ std::inserter(common, common.begin()));
+ std::vector<typename T::value_type> result;
+ for (const auto& e : order) {
+ if (common.find(e) != common.end()) result.push_back(e);
+ }
+ return result;
+}
+
+// Intersect two possibly unsorted vectors, return common elements according to 'comp' ordering.
+// 'comp' is a comparator function.
+template<typename T, typename Compare>
+std::vector<typename T::value_type> intersectAndOrder(
+ const T& input1, const T& input2, Compare comp)
+{
+ std::set<typename T::value_type, Compare> set1{input1.begin(), input1.end(), comp};
+ std::set<typename T::value_type, Compare> set2{input2.begin(), input2.end(), comp};
+ std::vector<typename T::value_type> result;
+ std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+ std::back_inserter(result), comp);
+ return result;
+}
+
+status_t AudioProfileVector::findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
+ const std::vector<audio_format_t>& preferredFormats,
+ const std::vector<audio_channel_mask_t>& preferredOutputChannels,
+ bool preferHigherSamplingRates,
+ audio_config_base *bestOutputConfig) const
+{
+ auto formats = intersectFilterAndOrder(getSupportedFormats(),
+ outputProfiles.getSupportedFormats(), preferredFormats);
+ // Pick the best compatible profile.
+ for (const auto& f : formats) {
+ sp<AudioProfile> inputProfile = getFirstValidProfileFor(f);
+ sp<AudioProfile> outputProfile = outputProfiles.getFirstValidProfileFor(f);
+ if (inputProfile == nullptr || outputProfile == nullptr) {
+ continue;
+ }
+ auto channels = intersectFilterAndOrder(inputProfile->getChannels().asOutMask(),
+ outputProfile->getChannels(), preferredOutputChannels);
+ if (channels.empty()) {
+ continue;
+ }
+ auto sampleRates = preferHigherSamplingRates ?
+ intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+ std::greater<typename SampleRateVector::value_type>()) :
+ intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+ std::less<typename SampleRateVector::value_type>());
+ if (sampleRates.empty()) {
+ continue;
+ }
+ ALOGD("%s() found channel mask %#x and sample rate %d for format %#x.",
+ __func__, *channels.begin(), *sampleRates.begin(), f);
+ bestOutputConfig->format = f;
+ bestOutputConfig->sample_rate = *sampleRates.begin();
+ bestOutputConfig->channel_mask = *channels.begin();
+ return NO_ERROR;
+ }
+ return BAD_VALUE;
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfile() const
+{
+ for (size_t i = 0; i < size(); i++) {
+ if (itemAt(i)->isValid()) {
+ return itemAt(i);
+ }
+ }
+ return 0;
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfileFor(audio_format_t format) const
+{
+ for (size_t i = 0; i < size(); i++) {
+ if (itemAt(i)->isValid() && itemAt(i)->getFormat() == format) {
+ return itemAt(i);
+ }
+ }
+ return 0;
+}
+
+FormatVector AudioProfileVector::getSupportedFormats() const
+{
+ FormatVector supportedFormats;
+ for (size_t i = 0; i < size(); i++) {
+ if (itemAt(i)->hasValidFormat()) {
+ supportedFormats.add(itemAt(i)->getFormat());
+ }
+ }
+ return supportedFormats;
+}
+
+bool AudioProfileVector::hasDynamicChannelsFor(audio_format_t format) const
+{
+ for (size_t i = 0; i < size(); i++) {
+ sp<AudioProfile> profile = itemAt(i);
+ if (profile->getFormat() == format && profile->isDynamicChannels()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool AudioProfileVector::hasDynamicProfile() const
+{
+ for (size_t i = 0; i < size(); i++) {
+ if (itemAt(i)->isDynamic()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool AudioProfileVector::hasDynamicRateFor(audio_format_t format) const
+{
+ for (size_t i = 0; i < size(); i++) {
+ sp<AudioProfile> profile = itemAt(i);
+ if (profile->getFormat() == format && profile->isDynamicRate()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void AudioProfileVector::setFormats(const FormatVector &formats)
+{
+ // Only allow to change the format of dynamic profile
+ sp<AudioProfile> dynamicFormatProfile = getProfileFor(gDynamicFormat);
+ if (dynamicFormatProfile == 0) {
+ return;
+ }
+ for (size_t i = 0; i < formats.size(); i++) {
+ sp<AudioProfile> profile = new AudioProfile(formats[i],
+ dynamicFormatProfile->getChannels(),
+ dynamicFormatProfile->getSampleRates());
+ profile->setDynamicFormat(true);
+ profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
+ profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
+ add(profile);
+ }
+}
+
+void AudioProfileVector::dump(int fd, int spaces) const
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+
+ snprintf(buffer, SIZE, "%*s- Profiles:\n", spaces, "");
+ write(fd, buffer, strlen(buffer));
+ for (size_t i = 0; i < size(); i++) {
+ snprintf(buffer, SIZE, "%*sProfile %zu:", spaces + 4, "", i);
+ write(fd, buffer, strlen(buffer));
+ itemAt(i)->dump(fd, spaces + 8);
+ }
+}
+
+sp<AudioProfile> AudioProfileVector::getProfileFor(audio_format_t format) const
+{
+ for (size_t i = 0; i < size(); i++) {
+ if (itemAt(i)->getFormat() == format) {
+ return itemAt(i);
+ }
+ }
+ return 0;
+}
+
+void AudioProfileVector::setSampleRatesFor(
+ const SampleRateVector &sampleRates, audio_format_t format)
+{
+ for (size_t i = 0; i < size(); i++) {
+ sp<AudioProfile> profile = itemAt(i);
+ if (profile->getFormat() == format && profile->isDynamicRate()) {
+ if (profile->hasValidRates()) {
+ // Need to create a new profile with same format
+ sp<AudioProfile> profileToAdd = new AudioProfile(format, profile->getChannels(),
+ sampleRates);
+ profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+ add(profileToAdd);
+ } else {
+ profile->setSampleRates(sampleRates);
+ }
+ return;
+ }
+ }
+}
+
+void AudioProfileVector::setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format)
+{
+ for (size_t i = 0; i < size(); i++) {
+ sp<AudioProfile> profile = itemAt(i);
+ if (profile->getFormat() == format && profile->isDynamicChannels()) {
+ if (profile->hasValidChannels()) {
+ // Need to create a new profile with same format
+ sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMasks,
+ profile->getSampleRates());
+ profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+ add(profileToAdd);
+ } else {
+ profile->setChannels(channelMasks);
+ }
+ return;
+ }
+ }
+}
+
+// static
int AudioProfileVector::compareFormats(const sp<AudioProfile> *profile1,
const sp<AudioProfile> *profile2)
{
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index 7cda46b..5ea4c92 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -35,14 +35,11 @@
audio_channel_mask_t channelMask,
audio_input_flags_t flags,
uid_t uid,
- bool isSoundTrigger,
- AudioMix* policyMix,
- AudioPolicyClientInterface *clientInterface) :
+ bool isSoundTrigger) :
mRecordClientInfo({ .uid = uid, .session = session, .source = inputSource}),
mConfig({ .format = format, .sample_rate = sampleRate, .channel_mask = channelMask}),
mFlags(flags), mIsSoundTrigger(isSoundTrigger),
- mOpenCount(1), mActiveCount(0), mPolicyMix(policyMix), mClientInterface(clientInterface),
- mInfoProvider(NULL)
+ mOpenCount(1), mActiveCount(0)
{
}
@@ -60,7 +57,6 @@
uint32_t AudioSession::changeActiveCount(int delta)
{
- const uint32_t oldActiveCount = mActiveCount;
if ((delta + (int)mActiveCount) < 0) {
ALOGW("%s invalid delta %d, active count %d",
__FUNCTION__, delta, mActiveCount);
@@ -68,34 +64,6 @@
}
mActiveCount += delta;
ALOGV("%s active count %d", __FUNCTION__, mActiveCount);
- int event = RECORD_CONFIG_EVENT_NONE;
-
- if ((oldActiveCount == 0) && (mActiveCount > 0)) {
- event = RECORD_CONFIG_EVENT_START;
- } else if ((oldActiveCount > 0) && (mActiveCount == 0)) {
- event = RECORD_CONFIG_EVENT_STOP;
- }
-
- if (event != RECORD_CONFIG_EVENT_NONE) {
- // Dynamic policy callback:
- // if input maps to a dynamic policy with an activity listener, notify of state change
- if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
- {
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
- (event == RECORD_CONFIG_EVENT_START) ? MIX_STATE_MIXING : MIX_STATE_IDLE);
- }
-
- // Recording configuration callback:
- const AudioSessionInfoProvider* provider = mInfoProvider;
- const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
- AUDIO_CONFIG_BASE_INITIALIZER;
- const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
- AUDIO_PATCH_HANDLE_NONE;
- if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
- mClientInterface->onRecordingConfigurationUpdate(event, &mRecordClientInfo,
- &mConfig, &deviceConfig, patchHandle);
- }
- }
return mActiveCount;
}
@@ -114,27 +82,6 @@
return false;
}
-void AudioSession::setInfoProvider(AudioSessionInfoProvider *provider)
-{
- mInfoProvider = provider;
-}
-
-void AudioSession::onSessionInfoUpdate() const
-{
- if (mActiveCount > 0) {
- // resend the callback after requerying the informations from the info provider
- const AudioSessionInfoProvider* provider = mInfoProvider;
- const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
- AUDIO_CONFIG_BASE_INITIALIZER;
- const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
- AUDIO_PATCH_HANDLE_NONE;
- if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
- mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
- &mRecordClientInfo, &mConfig, &deviceConfig, patchHandle);
- }
- }
-}
-
status_t AudioSession::dump(int fd, int spaces, int index) const
{
const size_t SIZE = 256;
@@ -169,8 +116,7 @@
}
status_t AudioSessionCollection::addSession(audio_session_t session,
- const sp<AudioSession>& audioSession,
- AudioSessionInfoProvider *provider)
+ const sp<AudioSession>& audioSession)
{
ssize_t index = indexOfKey(session);
@@ -178,7 +124,6 @@
ALOGW("addSession() session %d already in", session);
return ALREADY_EXISTS;
}
- audioSession->setInfoProvider(provider);
add(session, audioSession);
ALOGV("addSession() session %d client %d source %d",
session, audioSession->uid(), audioSession->inputSource());
@@ -194,7 +139,6 @@
return ALREADY_EXISTS;
}
ALOGV("removeSession() session %d", session);
- valueAt(index)->setInfoProvider(NULL);
removeItemsAt(index);
return NO_ERROR;
}
@@ -271,13 +215,6 @@
return source;
}
-void AudioSessionCollection::onSessionInfoUpdate() const
-{
- for (size_t i = 0; i < size(); i++) {
- valueAt(i)->onSessionInfoUpdate();
- }
-}
-
status_t AudioSessionCollection::dump(int fd, int spaces) const
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp
deleted file mode 100644
index ba33e57..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioSourceDescriptor"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include <utils/String8.h>
-#include <media/AudioPolicyHelper.h>
-#include <HwModule.h>
-#include <AudioGain.h>
-#include <AudioSourceDescriptor.h>
-#include <DeviceDescriptor.h>
-#include <IOProfile.h>
-#include <AudioOutputDescriptor.h>
-
-namespace android {
-
-status_t AudioSourceDescriptor::dump(int fd)
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- snprintf(buffer, SIZE, "mStream: %d\n", audio_attributes_to_stream_type(&mAttributes));
- result.append(buffer);
- snprintf(buffer, SIZE, "mDevice:\n");
- result.append(buffer);
- write(fd, result.string(), result.size());
- mDevice->dump(fd, 2 , 0);
- return NO_ERROR;
-}
-
-
-status_t AudioSourceCollection::dump(int fd) const
-{
- const size_t SIZE = 256;
- char buffer[SIZE];
-
- snprintf(buffer, SIZE, "\nAudio sources dump:\n");
- write(fd, buffer, strlen(buffer));
- for (size_t i = 0; i < size(); i++) {
- snprintf(buffer, SIZE, "- Source %d dump:\n", keyAt(i));
- write(fd, buffer, strlen(buffer));
- valueAt(i)->dump(fd);
- }
-
- return NO_ERROR;
-}
-
-}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
new file mode 100644
index 0000000..5aca3cc
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM_ClientDescriptor"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include "AudioGain.h"
+#include "AudioOutputDescriptor.h"
+#include "AudioPatch.h"
+#include "ClientDescriptor.h"
+#include "DeviceDescriptor.h"
+#include "HwModule.h"
+#include "IOProfile.h"
+
+namespace android {
+
+status_t ClientDescriptor::dump(int fd, int spaces, int index)
+{
+ String8 out;
+
+ // FIXME: use until other descriptor classes have a dump to String8 method
+ mDumpFd = fd;
+
+ status_t status = dump(out, spaces, index);
+ if (status == NO_ERROR) {
+ write(fd, out.string(), out.size());
+ }
+
+ return status;
+}
+
+status_t ClientDescriptor::dump(String8& out, int spaces, int index)
+{
+ out.appendFormat("%*sClient %d:\n", spaces, "", index+1);
+ out.appendFormat("%*s- Port ID: %d Session Id: %d UID: %d\n", spaces, "",
+ mPortId, mSessionId, mUid);
+ out.appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
+ mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
+ out.appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
+ out.appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
+ return NO_ERROR;
+}
+
+status_t TrackClientDescriptor::dump(String8& out, int spaces, int index)
+{
+ ClientDescriptor::dump(out, spaces, index);
+
+ out.appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
+
+ return NO_ERROR;
+}
+
+status_t RecordClientDescriptor::dump(String8& out, int spaces, int index)
+{
+ ClientDescriptor::dump(out, spaces, index);
+
+ out.appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+
+ return NO_ERROR;
+}
+
+SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
+ audio_attributes_t attributes, const sp<AudioPatch>& patchDesc,
+ const sp<DeviceDescriptor>& srcDevice, audio_stream_type_t stream) :
+ TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
+ AUDIO_CONFIG_BASE_INITIALIZER, AUDIO_PORT_HANDLE_NONE, stream, AUDIO_OUTPUT_FLAG_NONE),
+ mPatchDesc(patchDesc), mSrcDevice(srcDevice)
+{
+}
+
+void SourceClientDescriptor::setSwOutput(const sp<SwAudioOutputDescriptor>& swOutput)
+{
+ mSwOutput = swOutput;
+}
+
+void SourceClientDescriptor::setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput)
+{
+ mHwOutput = hwOutput;
+}
+
+status_t SourceClientDescriptor::dump(String8& out, int spaces, int index)
+{
+ TrackClientDescriptor::dump(out, spaces, index);
+
+ if (mDumpFd >= 0) {
+ out.appendFormat("%*s- Device:\n", spaces, "");
+ write(mDumpFd, out.string(), out.size());
+
+ mSrcDevice->dump(mDumpFd, 2, 0);
+ mDumpFd = -1;
+ }
+
+ return NO_ERROR;
+}
+
+status_t SourceClientCollection::dump(int fd) const
+{
+ String8 out;
+ out.append("\nAudio sources:\n");
+ write(fd, out.string(), out.size());
+ for (size_t i = 0; i < size(); i++) {
+ valueAt(i)->dump(fd, 2, i);
+ }
+
+ return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
index 1e105f5..19eac26 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -412,6 +412,7 @@
free(data);
ALOGI("loadAudioPolicyConfig() loaded %s\n", path);
+ config.setSource(path);
return NO_ERROR;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 19c2062..1638645 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -121,17 +121,28 @@
return ret;
}
-audio_devices_t DeviceVector::getDevicesFromHwModule(audio_module_handle_t moduleHandle) const
+DeviceVector DeviceVector::getDevicesFromHwModule(audio_module_handle_t moduleHandle) const
{
- audio_devices_t devices = AUDIO_DEVICE_NONE;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getModuleHandle() == moduleHandle) {
- devices |= itemAt(i)->type();
+ DeviceVector devices;
+ for (const auto& device : *this) {
+ if (device->getModuleHandle() == moduleHandle) {
+ devices.add(device);
}
}
return devices;
}
+audio_devices_t DeviceVector::getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const
+{
+ audio_devices_t deviceTypes = AUDIO_DEVICE_NONE;
+ for (const auto& device : *this) {
+ if (device->getModuleHandle() == moduleHandle) {
+ deviceTypes |= device->type();
+ }
+ }
+ return deviceTypes;
+}
+
sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address) const
{
sp<DeviceDescriptor> device;
@@ -145,8 +156,8 @@
}
}
}
- ALOGV("DeviceVector::getDevice() for type %08x address %s found %p",
- type, address.string(), device.get());
+ ALOGV("DeviceVector::%s() for type %08x address \"%s\" found %p",
+ __func__, type, address.string(), device.get());
return device;
}
@@ -160,7 +171,7 @@
return nullptr;
}
-DeviceVector DeviceVector::getDevicesFromType(audio_devices_t type) const
+DeviceVector DeviceVector::getDevicesFromTypeMask(audio_devices_t type) const
{
DeviceVector devices;
bool isOutput = audio_is_output_devices(type);
@@ -171,20 +182,8 @@
if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
devices.add(itemAt(i));
type &= ~curType;
- ALOGV("DeviceVector::getDevicesFromType() for type %x found %p",
- itemAt(i)->type(), itemAt(i).get());
- }
- }
- return devices;
-}
-
-DeviceVector DeviceVector::getDevicesFromTypeAddr(
- audio_devices_t type, const String8& address) const
-{
- DeviceVector devices;
- for (const auto& device : *this) {
- if (device->type() == type && device->mAddress == address) {
- devices.add(device);
+ ALOGV("DeviceVector::%s() for type %08x found %p",
+ __func__, itemAt(i)->type(), itemAt(i).get());
}
}
return devices;
@@ -253,7 +252,7 @@
void DeviceDescriptor::toAudioPort(struct audio_port *port) const
{
- ALOGV("DeviceDescriptor::toAudioPort() handle %d type %x", mId, mDeviceType);
+ ALOGV("DeviceDescriptor::toAudioPort() handle %d type %08x", mId, mDeviceType);
AudioPort::toAudioPort(port);
port->id = mId;
toAudioPortConfig(&port->active_config);
@@ -305,7 +304,7 @@
{
std::string device;
deviceToString(mDeviceType, device);
- ALOGI("Device id:%d type:0x%X:%s, addr:%s", mId, mDeviceType, device.c_str(),
+ ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId, mDeviceType, device.c_str(),
mAddress.string());
AudioPort::log(" ");
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index aef7dbe..dcc0ec8 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -278,9 +278,10 @@
sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
const char *device_address,
const char *device_name,
- bool matchAdress) const
+ bool matchAddress) const
{
- String8 address = (device_address == nullptr) ? String8("") : String8(device_address);
+ String8 address = (device_address == nullptr || !matchAddress) ?
+ String8("") : String8(device_address);
// handle legacy remote submix case where the address was not always specified
if (device_distinguishes_on_address(device) && (address.length() == 0)) {
address = String8("0");
@@ -288,15 +289,9 @@
for (const auto& hwModule : *this) {
DeviceVector declaredDevices = hwModule->getDeclaredDevices();
- DeviceVector deviceList = declaredDevices.getDevicesFromTypeAddr(device, address);
- if (!deviceList.isEmpty()) {
- return deviceList.itemAt(0);
- }
- if (!matchAdress) {
- deviceList = declaredDevices.getDevicesFromType(device);
- if (!deviceList.isEmpty()) {
- return deviceList.itemAt(0);
- }
+ sp<DeviceDescriptor> deviceDesc = declaredDevices.getDevice(device, address);
+ if (deviceDesc) {
+ return deviceDesc;
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index a253113..8008a7c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -242,12 +242,7 @@
AudioProfileTraits::Collection profiles;
deserializeCollection<AudioProfileTraits>(doc, child, profiles, NULL);
if (profiles.isEmpty()) {
- sp <AudioProfile> dynamicProfile = new AudioProfile(gDynamicFormat,
- ChannelsVector(), SampleRateVector());
- dynamicProfile->setDynamicFormat(true);
- dynamicProfile->setDynamicChannels(true);
- dynamicProfile->setDynamicRate(true);
- profiles.add(dynamicProfile);
+ profiles.add(AudioProfile::createFullDynamic());
}
mixPort->setAudioProfiles(profiles);
@@ -328,12 +323,7 @@
AudioProfileTraits::Collection profiles;
deserializeCollection<AudioProfileTraits>(doc, root, profiles, NULL);
if (profiles.isEmpty()) {
- sp <AudioProfile> dynamicProfile = new AudioProfile(gDynamicFormat,
- ChannelsVector(), SampleRateVector());
- dynamicProfile->setDynamicFormat(true);
- dynamicProfile->setDynamicChannels(true);
- dynamicProfile->setDynamicRate(true);
- profiles.add(dynamicProfile);
+ profiles.add(AudioProfile::createFullDynamic());
}
deviceDesc->setAudioProfiles(profiles);
diff --git a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
index 2206526..440a4e7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
@@ -127,7 +127,7 @@
if (streamType == route->mStreamType && route->isActiveOrChanged()
&& route->mDeviceDescriptor != 0) {
audio_devices_t device = route->mDeviceDescriptor->type();
- if (!availableDevices.getDevicesFromType(device).isEmpty()) {
+ if (!availableDevices.getDevicesFromTypeMask(device).isEmpty()) {
return device;
}
}
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index a75f1cb..9381f1f 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -185,6 +185,9 @@
<!-- Hearing aid Audio HAL -->
<xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+ <!-- MSD Audio HAL (optional) -->
+ <xi:include href="msd_audio_policy_configuration.xml"/>
+
</modules>
<!-- End of Modules section -->
diff --git a/services/audiopolicy/config/msd_audio_policy_configuration.xml b/services/audiopolicy/config/msd_audio_policy_configuration.xml
new file mode 100644
index 0000000..a84117e
--- /dev/null
+++ b/services/audiopolicy/config/msd_audio_policy_configuration.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2017-2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Multi Stream Decoder Audio Policy Configuration file -->
+<module name="msd" halVersion="2.0">
+ <attachedDevices>
+ <item>MS12 Input</item>
+ <item>MS12 Output</item>
+ </attachedDevices>
+ <mixPorts>
+ <mixPort name="ms12 input" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="ms12 compressed input" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_AC4"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
+ </mixPort>
+ <mixPort name="ms12 output" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="MS12 Input" type="AUDIO_DEVICE_OUT_BUS" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="MS12 Output" type="AUDIO_DEVICE_IN_BUS" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="MS12 Input" sources="ms12 input,ms12 compressed input"/>
+ <route type="mix" sink="ms12 output" sources="MS12 Output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index 36e0f42..b128a38 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -10,7 +10,6 @@
$(LOCAL_PATH)/include \
frameworks/av/services/audiopolicy/engineconfigurable/include \
frameworks/av/services/audiopolicy/engineconfigurable/interface \
- frameworks/av/services/audiopolicy/utilities/convert \
LOCAL_SRC_FILES:= ParameterManagerWrapper.cpp
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 267996c..30f275f 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -313,7 +313,7 @@
audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
audio_devices_t availPrimaryInputDevices =
- availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
+ availableInputDevices.getDeviceTypesFromHwModule(primaryOutput->getModuleHandle());
// TODO: getPrimaryOutput return only devices from first module in
// audio_policy_configuration.xml, hearing aid is not there, but it's
@@ -408,8 +408,7 @@
case STRATEGY_SONIFICATION:
- // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
- // handleIncallSonification().
+ // If incall, just select the STRATEGY_PHONE device
if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
device = getDeviceForStrategyInt(
STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
@@ -482,7 +481,7 @@
}
}
availableOutputDevices =
- availableOutputDevices.getDevicesFromType(availableOutputDevicesType);
+ availableOutputDevices.getDevicesFromTypeMask(availableOutputDevicesType);
if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
return getDeviceForStrategyInt(
@@ -669,9 +668,8 @@
if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
(availableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
- availableDeviceTypes =
- availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle())
- & ~AUDIO_DEVICE_BIT_IN;
+ availableDeviceTypes = availableInputDevices.getDeviceTypesFromHwModule(
+ primaryOutput->getModuleHandle()) & ~AUDIO_DEVICE_BIT_IN;
}
switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 0d6cfda..b0e6b0f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -31,6 +31,7 @@
#include <inttypes.h>
#include <math.h>
+#include <vector>
#include <AudioPolicyManagerInterface.h>
#include <AudioPolicyEngineInstance.h>
@@ -38,6 +39,7 @@
#include <utils/Log.h>
#include <media/AudioParameter.h>
#include <media/AudioPolicyHelper.h>
+#include <private/android_filesystem_config.h>
#include <soundtrigger/SoundTrigger.h>
#include <system/audio.h>
#include <audio_policy_conf.h>
@@ -80,6 +82,16 @@
AUDIO_FORMAT_AAC_XHE,
};
+// Compressed formats for MSD module, ordered from most preferred to least preferred.
+static const std::vector<audio_format_t> compressedFormatsOrder = {{
+ AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3,
+ AUDIO_FORMAT_AC3, AUDIO_FORMAT_PCM_16_BIT }};
+// Channel masks for MSD module, 3D > 2D > 1D ordering (most preferred to least preferred).
+static const std::vector<audio_channel_mask_t> surroundChannelMasksOrder = {{
+ AUDIO_CHANNEL_OUT_3POINT1POINT2, AUDIO_CHANNEL_OUT_3POINT0POINT2,
+ AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2,
+ AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }};
+
// ----------------------------------------------------------------------------
// AudioPolicyInterface implementation
// ----------------------------------------------------------------------------
@@ -200,31 +212,30 @@
return BAD_VALUE;
}
- // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
- // output is suspended before any tracks are moved to it
- checkA2dpSuspend();
- checkOutputForAllStrategies();
- // outputs must be closed after checkOutputForAllStrategies() is executed
- if (!outputs.isEmpty()) {
- for (audio_io_handle_t output : outputs) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
- // close unused outputs after device disconnection or direct outputs that have been
- // opened by checkOutputsForDevice() to query dynamic parameters
- if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
- (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
- (desc->mDirectOpenCount == 0))) {
- closeOutput(output);
+ checkForDeviceAndOutputChanges([&]() {
+ // outputs must be closed after checkOutputForAllStrategies() is executed
+ if (!outputs.isEmpty()) {
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
+ // close unused outputs after device disconnection or direct outputs that have been
+ // opened by checkOutputsForDevice() to query dynamic parameters
+ if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
+ (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+ (desc->mDirectOpenCount == 0))) {
+ closeOutput(output);
+ }
}
+ // check A2DP again after closing A2DP output to reset mA2dpSuspended if needed
+ return true;
}
- // check again after closing A2DP output to reset mA2dpSuspended if needed
- checkA2dpSuspend();
- }
+ return false;
+ });
- updateDevicesAndOutputs();
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
updateCallRouting(newDevice);
}
+ const audio_devices_t msdOutDevice = getMsdAudioOutDeviceTypes();
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) {
@@ -232,7 +243,8 @@
// do not force device change on duplicated output because if device is 0, it will
// also force a device 0 for the two outputs it is duplicated to which may override
// a valid device selection on those outputs.
- bool force = !desc->isDuplicated()
+ bool force = (msdOutDevice == AUDIO_DEVICE_NONE || msdOutDevice != desc->device())
+ && !desc->isDuplicated()
&& (!device_distinguishes_on_address(device)
// always force when disconnecting (a non-duplicated device)
|| (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
@@ -476,20 +488,15 @@
sp<AudioPatch> AudioPolicyManager::createTelephonyPatch(
bool isRx, audio_devices_t device, uint32_t delayMs) {
- struct audio_patch patch;
- patch.num_sources = 1;
- patch.num_sinks = 1;
+ PatchBuilder patchBuilder;
sp<DeviceDescriptor> txSourceDeviceDesc;
if (isRx) {
- fillAudioPortConfigForDevice(mAvailableOutputDevices, device, &patch.sinks[0]);
- fillAudioPortConfigForDevice(
- mAvailableInputDevices, AUDIO_DEVICE_IN_TELEPHONY_RX, &patch.sources[0]);
+ patchBuilder.addSink(findDevice(mAvailableOutputDevices, device)).
+ addSource(findDevice(mAvailableInputDevices, AUDIO_DEVICE_IN_TELEPHONY_RX));
} else {
- txSourceDeviceDesc = fillAudioPortConfigForDevice(
- mAvailableInputDevices, device, &patch.sources[0]);
- fillAudioPortConfigForDevice(
- mAvailableOutputDevices, AUDIO_DEVICE_OUT_TELEPHONY_TX, &patch.sinks[0]);
+ patchBuilder.addSource(txSourceDeviceDesc = findDevice(mAvailableInputDevices, device)).
+ addSink(findDevice(mAvailableOutputDevices, AUDIO_DEVICE_OUT_TELEPHONY_TX));
}
audio_devices_t outputDevice = isRx ? device : AUDIO_DEVICE_OUT_TELEPHONY_TX;
@@ -500,9 +507,7 @@
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
ALOG_ASSERT(!outputDesc->isDuplicated(),
"%s() %#x device output %d is duplicated", __func__, outputDevice, output);
- outputDesc->toAudioPortConfig(&patch.sources[1]);
- patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
- patch.num_sources = 2;
+ patchBuilder.addSource(outputDesc, { .stream = AUDIO_STREAM_PATCH });
}
if (!isRx) {
@@ -512,38 +517,31 @@
// symmetric to the one in startInput()
for (const auto& activeDesc : mInputs.getActiveInputs()) {
if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
- AudioSessionCollection activeSessions =
- activeDesc->getAudioSessions(true /*activeOnly*/);
- for (size_t j = 0; j < activeSessions.size(); j++) {
- audio_session_t activeSession = activeSessions.keyAt(j);
- stopInput(activeDesc->mIoHandle, activeSession);
- releaseInput(activeDesc->mIoHandle, activeSession);
- }
+ closeSessions(activeDesc, true /*activeOnly*/);
}
}
}
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- status_t status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs);
+ status_t status = mpClientInterface->createAudioPatch(
+ patchBuilder.patch(), &afPatchHandle, delayMs);
ALOGW_IF(status != NO_ERROR,
"%s() error %d creating %s audio patch", __func__, status, isRx ? "RX" : "TX");
sp<AudioPatch> audioPatch;
if (status == NO_ERROR) {
- audioPatch = new AudioPatch(&patch, mUidCached);
+ audioPatch = new AudioPatch(patchBuilder.patch(), mUidCached);
audioPatch->mAfPatchHandle = afPatchHandle;
audioPatch->mUid = mUidCached;
}
return audioPatch;
}
-sp<DeviceDescriptor> AudioPolicyManager::fillAudioPortConfigForDevice(
- const DeviceVector& devices, audio_devices_t device, audio_port_config *config) {
- DeviceVector deviceList = devices.getDevicesFromType(device);
+sp<DeviceDescriptor> AudioPolicyManager::findDevice(
+ const DeviceVector& devices, audio_devices_t device) const {
+ DeviceVector deviceList = devices.getDevicesFromTypeMask(device);
ALOG_ASSERT(!deviceList.isEmpty(),
"%s() selected device type %#x is not in devices list", __func__, device);
- sp<DeviceDescriptor> deviceDesc = deviceList.itemAt(0);
- deviceDesc->toAudioPortConfig(config);
- return deviceDesc;
+ return deviceList.itemAt(0);
}
void AudioPolicyManager::setPhoneState(audio_mode_t state)
@@ -557,14 +555,8 @@
return;
}
/// Opens: can these line be executed after the switch of volume curves???
- // if leaving call state, handle special case of active streams
- // pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(oldState)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
- handleIncallSonification((audio_stream_type_t)stream, false, true);
- }
-
// force reevaluating accessibility routing when call stops
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
@@ -577,9 +569,7 @@
|| (is_state_in_call(state) && (state != oldState)));
// check for device and output changes triggered by new phone state
- checkA2dpSuspend();
- checkOutputForAllStrategies();
- updateDevicesAndOutputs();
+ checkForDeviceAndOutputChanges();
int delayMs = 0;
if (isStateInCall(state)) {
@@ -643,14 +633,8 @@
}
}
- // if entering in call state, handle special case of active streams
- // pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(state)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
- handleIncallSonification((audio_stream_type_t)stream, true, true);
- }
-
// force reevaluating accessibility routing when call starts
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
@@ -685,9 +669,7 @@
(usage == AUDIO_POLICY_FORCE_FOR_SYSTEM);
// check for device and output changes triggered by new force usage
- checkA2dpSuspend();
- checkOutputForAllStrategies();
- updateDevicesAndOutputs();
+ checkForDeviceAndOutputChanges();
//FIXME: workaround for truncated touch sounds
// to be removed when the problem is handled by system UI
@@ -803,6 +785,12 @@
audio_port_handle_t *portId)
{
audio_attributes_t attributes;
+ DeviceVector outputDevices;
+ routing_strategy strategy;
+ audio_devices_t device;
+ audio_port_handle_t requestedDeviceId = *selectedDeviceId;
+ audio_devices_t msdDevice = getMsdAudioOutDeviceTypes();
+
if (attr != NULL) {
if (!isValidAttributes(attr)) {
ALOGE("getOutputForAttr() invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
@@ -819,43 +807,57 @@
stream_type_to_audio_attributes(*stream, &attributes);
}
- // TODO: check for existing client for this port ID
- if (*portId == AUDIO_PORT_HANDLE_NONE) {
- *portId = AudioPort::getNextUniqueId();
- }
-
- sp<SwAudioOutputDescriptor> desc;
- if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
- ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
- if (!audio_has_proportional_frames(config->format)) {
- return BAD_VALUE;
- }
- *stream = streamTypefromAttributesInt(&attributes);
- *output = desc->mIoHandle;
- ALOGV("getOutputForAttr() returns output %d", *output);
- return NO_ERROR;
- }
- if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
- ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
- return BAD_VALUE;
- }
-
ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
" session %d selectedDeviceId %d",
attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
session, *selectedDeviceId);
- *stream = streamTypefromAttributesInt(&attributes);
+ // TODO: check for existing client for this port ID
+ if (*portId == AUDIO_PORT_HANDLE_NONE) {
+ *portId = AudioPort::getNextUniqueId();
+ }
- // Explicit routing?
+ // First check for explicit routing (eg. setPreferredDevice)
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
+ } else {
+ // If no explict route, is there a matching dynamic policy that applies?
+ sp<SwAudioOutputDescriptor> desc;
+ if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
+ ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
+ if (!audio_has_proportional_frames(config->format)) {
+ return BAD_VALUE;
+ }
+ *stream = streamTypefromAttributesInt(&attributes);
+ *output = desc->mIoHandle;
+ ALOGV("getOutputForAttr() returns output %d", *output);
+ goto exit;
+ }
+
+ // Virtual sources must always be dynamicaly or explicitly routed
+ if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+ ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+ return BAD_VALUE;
+ }
}
+
+ // Virtual sources must always be dynamicaly or explicitly routed
+ if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+ ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+ return BAD_VALUE;
+ }
+
+ *stream = streamTypefromAttributesInt(&attributes);
+
+ // TODO: Should this happen only if an explicit route is active?
+ // the previous code structure meant that this would always happen which
+ // would appear to result in adding a null deviceDesc when not using an
+ // explicit route. Is that the intended and necessary behavior?
mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
- routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
- audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+ strategy = (routing_strategy) getStrategyForAttr(&attributes);
+ device = getDeviceForStrategy(strategy, false /*fromCache*/);
if ((attributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
@@ -880,17 +882,41 @@
"flags %#x",
device, config->sample_rate, config->format, config->channel_mask, *flags);
- *output = getOutputForDevice(device, session, *stream, config, flags);
+ *output = AUDIO_IO_HANDLE_NONE;
+ if (msdDevice != AUDIO_DEVICE_NONE) {
+ *output = getOutputForDevice(msdDevice, session, *stream, config, flags);
+ if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(device) == NO_ERROR) {
+ ALOGV("%s() Using MSD device 0x%x instead of device 0x%x",
+ __func__, msdDevice, device);
+ device = msdDevice;
+ } else {
+ *output = AUDIO_IO_HANDLE_NONE;
+ }
+ }
+ if (*output == AUDIO_IO_HANDLE_NONE) {
+ *output = getOutputForDevice(device, session, *stream, config, flags);
+ }
if (*output == AUDIO_IO_HANDLE_NONE) {
mOutputRoutes.removeRoute(session);
return INVALID_OPERATION;
}
- DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
+ outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
*selectedDeviceId = outputDevices.size() > 0 ? outputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
- ALOGV(" getOutputForAttr() returns output %d selectedDeviceId %d", *output, *selectedDeviceId);
+exit:
+ audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
+ .format = config->format,
+ .channel_mask = config->channel_mask };
+ sp<TrackClientDescriptor> clientDesc =
+ new TrackClientDescriptor(*portId, uid, session,
+ attributes, clientConfig, requestedDeviceId, *stream, *flags);
+ sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
+ outputDesc->clients().emplace(*portId, clientDesc);
+
+ ALOGV(" getOutputForAttr() returns output %d selectedDeviceId %d for port ID %d",
+ *output, *selectedDeviceId, *portId);
return NO_ERROR;
}
@@ -986,7 +1012,7 @@
sp<SwAudioOutputDescriptor> outputDesc =
new SwAudioOutputDescriptor(profile, mpClientInterface);
- DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
+ DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
: String8("");
@@ -1052,6 +1078,164 @@
return output;
}
+sp<DeviceDescriptor> AudioPolicyManager::getMsdAudioInDevice() const {
+ sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+ if (msdModule != 0) {
+ DeviceVector msdInputDevices = mAvailableInputDevices.getDevicesFromHwModule(
+ msdModule->getHandle());
+ if (!msdInputDevices.isEmpty()) return msdInputDevices.itemAt(0);
+ }
+ return 0;
+}
+
+audio_devices_t AudioPolicyManager::getMsdAudioOutDeviceTypes() const {
+ sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+ if (msdModule != 0) {
+ return mAvailableOutputDevices.getDeviceTypesFromHwModule(msdModule->getHandle());
+ }
+ return AUDIO_DEVICE_NONE;
+}
+
+const AudioPatchCollection AudioPolicyManager::getMsdPatches() const {
+ AudioPatchCollection msdPatches;
+ audio_module_handle_t msdModuleHandle = mHwModules.getModuleFromName(
+ AUDIO_HARDWARE_MODULE_ID_MSD)->getHandle();
+ if (msdModuleHandle == AUDIO_MODULE_HANDLE_NONE) return msdPatches;
+ for (size_t i = 0; i < mAudioPatches.size(); ++i) {
+ sp<AudioPatch> patch = mAudioPatches.valueAt(i);
+ for (size_t j = 0; j < patch->mPatch.num_sources; ++j) {
+ const struct audio_port_config *source = &patch->mPatch.sources[j];
+ if (source->type == AUDIO_PORT_TYPE_DEVICE &&
+ source->ext.device.hw_module == msdModuleHandle) {
+ msdPatches.addAudioPatch(patch->mHandle, patch);
+ }
+ }
+ }
+ return msdPatches;
+}
+
+status_t AudioPolicyManager::getBestMsdAudioProfileFor(audio_devices_t outputDevice,
+ bool hwAvSync, audio_port_config *sourceConfig, audio_port_config *sinkConfig) const
+{
+ sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+ if (msdModule == nullptr) {
+ ALOGE("%s() unable to get MSD module", __func__);
+ return NO_INIT;
+ }
+ sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice);
+ if (deviceModule == nullptr) {
+ ALOGE("%s() unable to get module for %#x", __func__, outputDevice);
+ return NO_INIT;
+ }
+ const InputProfileCollection &inputProfiles = msdModule->getInputProfiles();
+ if (inputProfiles.isEmpty()) {
+ ALOGE("%s() no input profiles for MSD module", __func__);
+ return NO_INIT;
+ }
+ const OutputProfileCollection &outputProfiles = deviceModule->getOutputProfiles();
+ if (outputProfiles.isEmpty()) {
+ ALOGE("%s() no output profiles for device %#x", __func__, outputDevice);
+ return NO_INIT;
+ }
+ AudioProfileVector msdProfiles;
+ // Each IOProfile represents a MixPort from audio_policy_configuration.xml
+ for (const auto &inProfile : inputProfiles) {
+ if (hwAvSync == ((inProfile->getFlags() & AUDIO_INPUT_FLAG_HW_AV_SYNC) != 0)) {
+ msdProfiles.appendVector(inProfile->getAudioProfiles());
+ }
+ }
+ AudioProfileVector deviceProfiles;
+ for (const auto &outProfile : outputProfiles) {
+ if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0)) {
+ deviceProfiles.appendVector(outProfile->getAudioProfiles());
+ }
+ }
+ struct audio_config_base bestSinkConfig;
+ status_t result = msdProfiles.findBestMatchingOutputConfig(deviceProfiles,
+ compressedFormatsOrder, surroundChannelMasksOrder, true /*preferHigherSamplingRates*/,
+ &bestSinkConfig);
+ if (result != NO_ERROR) {
+ ALOGD("%s() no matching profiles found for device: %#x, hwAvSync: %d",
+ __func__, outputDevice, hwAvSync);
+ return result;
+ }
+ sinkConfig->sample_rate = bestSinkConfig.sample_rate;
+ sinkConfig->channel_mask = bestSinkConfig.channel_mask;
+ sinkConfig->format = bestSinkConfig.format;
+ // For encoded streams force direct flag to prevent downstream mixing.
+ sinkConfig->flags.output = static_cast<audio_output_flags_t>(
+ sinkConfig->flags.output | AUDIO_OUTPUT_FLAG_DIRECT);
+ sourceConfig->sample_rate = bestSinkConfig.sample_rate;
+ // Specify exact channel mask to prevent guessing by bit count in PatchPanel.
+ sourceConfig->channel_mask = audio_channel_mask_out_to_in(bestSinkConfig.channel_mask);
+ sourceConfig->format = bestSinkConfig.format;
+ // Copy input stream directly without any processing (e.g. resampling).
+ sourceConfig->flags.input = static_cast<audio_input_flags_t>(
+ sourceConfig->flags.input | AUDIO_INPUT_FLAG_DIRECT);
+ if (hwAvSync) {
+ sinkConfig->flags.output = static_cast<audio_output_flags_t>(
+ sinkConfig->flags.output | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
+ sourceConfig->flags.input = static_cast<audio_input_flags_t>(
+ sourceConfig->flags.input | AUDIO_INPUT_FLAG_HW_AV_SYNC);
+ }
+ const unsigned int config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE |
+ AUDIO_PORT_CONFIG_CHANNEL_MASK | AUDIO_PORT_CONFIG_FORMAT | AUDIO_PORT_CONFIG_FLAGS;
+ sinkConfig->config_mask |= config_mask;
+ sourceConfig->config_mask |= config_mask;
+ return NO_ERROR;
+}
+
+PatchBuilder AudioPolicyManager::buildMsdPatch(audio_devices_t outputDevice) const
+{
+ PatchBuilder patchBuilder;
+ patchBuilder.addSource(getMsdAudioInDevice()).
+ addSink(findDevice(mAvailableOutputDevices, outputDevice));
+ audio_port_config sourceConfig = patchBuilder.patch()->sources[0];
+ audio_port_config sinkConfig = patchBuilder.patch()->sinks[0];
+ // TODO: Figure out whether MSD module has HW_AV_SYNC flag set in the AP config file.
+ // For now, we just forcefully try with HwAvSync first.
+ status_t res = getBestMsdAudioProfileFor(outputDevice, true /*hwAvSync*/,
+ &sourceConfig, &sinkConfig) == NO_ERROR ? NO_ERROR :
+ getBestMsdAudioProfileFor(
+ outputDevice, false /*hwAvSync*/, &sourceConfig, &sinkConfig);
+ if (res == NO_ERROR) {
+ // Found a matching profile for encoded audio. Re-create PatchBuilder with this config.
+ return (PatchBuilder()).addSource(sourceConfig).addSink(sinkConfig);
+ }
+ ALOGV("%s() no matching profile found. Fall through to default PCM patch"
+ " supporting PCM format conversion.", __func__);
+ return patchBuilder;
+}
+
+status_t AudioPolicyManager::setMsdPatch(audio_devices_t outputDevice) {
+ ALOGV("%s() for outputDevice %#x", __func__, outputDevice);
+ if (outputDevice == AUDIO_DEVICE_NONE) {
+ // Use media strategy for unspecified output device. This should only
+ // occur on checkForDeviceAndOutputChanges(). Device connection events may
+ // therefore invalidate explicit routing requests.
+ outputDevice = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
+ }
+ PatchBuilder patchBuilder = buildMsdPatch(outputDevice);
+ const struct audio_patch* patch = patchBuilder.patch();
+ const AudioPatchCollection msdPatches = getMsdPatches();
+ if (!msdPatches.isEmpty()) {
+ LOG_ALWAYS_FATAL_IF(msdPatches.size() > 1,
+ "The current MSD prototype only supports one output patch");
+ sp<AudioPatch> currentPatch = msdPatches.valueAt(0);
+ if (audio_patches_are_equal(¤tPatch->mPatch, patch)) {
+ return NO_ERROR;
+ }
+ releaseAudioPatch(currentPatch->mHandle, mUidCached);
+ }
+ status_t status = installPatch(__func__, -1 /*index*/, nullptr /*patchHandle*/,
+ patch, 0 /*delayMs*/, mUidCached, nullptr /*patchDescPtr*/);
+ ALOGE_IF(status != NO_ERROR, "%s() error %d creating MSD audio patch", __func__, status);
+ ALOGI_IF(status == NO_ERROR, "%s() Patch created from MSD_IN to "
+ "device:%#x (format:%#x channels:%#x samplerate:%d)", __func__, outputDevice,
+ patch->sources[0].format, patch->sources[0].channel_mask, patch->sources[0].sample_rate);
+ return status;
+}
+
audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
audio_output_flags_t flags,
audio_format_t format)
@@ -1132,19 +1316,21 @@
return outputs[0];
}
-status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioPolicyManager::startOutput(audio_port_handle_t portId)
{
- ALOGV("startOutput() output %d, stream %d, session %d",
- output, stream, session);
- ssize_t index = mOutputs.indexOfKey(output);
- if (index < 0) {
- ALOGW("startOutput() unknown output %d", output);
+ ALOGV("%s portId %d", __FUNCTION__, portId);
+
+ sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId);
+ if (outputDesc == 0) {
+ ALOGW("startOutput() no output for client %d", portId);
return BAD_VALUE;
}
+ sp<TrackClientDescriptor> client = outputDesc->clients()[portId];
+ audio_stream_type_t stream = client->stream();
+ audio_session_t session = client->session();
- sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+ ALOGV("startOutput() output %d, stream %d, session %d",
+ outputDesc->mIoHandle, stream, session);
status_t status = outputDesc->start();
if (status != NO_ERROR) {
@@ -1168,7 +1354,7 @@
} else if (mOutputRoutes.getAndClearRouteChanged(session)) {
newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
if (newDevice != outputDesc->device()) {
- checkStrategyRoute(getStrategy(stream), output);
+ checkStrategyRoute(getStrategy(stream), outputDesc->mIoHandle);
}
} else {
newDevice = AUDIO_DEVICE_NONE;
@@ -1295,11 +1481,6 @@
const uint32_t muteWaitMs =
setOutputDevice(outputDesc, device, force, 0, NULL, address, requiresMuteCheck);
- // handle special case for sonification while in call
- if (isInCall()) {
- handleIncallSonification(stream, true, false);
- }
-
// apply volume rules for current stream and device if necessary
checkAndSetVolume(stream,
mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
@@ -1333,27 +1514,23 @@
setStrategyMute(STRATEGY_SONIFICATION, true, outputDesc);
}
- if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
- mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
- setStrategyMute(STRATEGY_SONIFICATION, true, outputDesc);
- }
-
return NO_ERROR;
}
-
-status_t AudioPolicyManager::stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioPolicyManager::stopOutput(audio_port_handle_t portId)
{
- ALOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
- ssize_t index = mOutputs.indexOfKey(output);
- if (index < 0) {
- ALOGW("stopOutput() unknown output %d", output);
+ ALOGV("%s portId %d", __FUNCTION__, portId);
+
+ sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId);
+ if (outputDesc == 0) {
+ ALOGW("stopOutput() no output for client %d", portId);
return BAD_VALUE;
}
+ sp<TrackClientDescriptor> client = outputDesc->clients()[portId];
+ audio_stream_type_t stream = client->stream();
+ audio_session_t session = client->session();
- sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+ ALOGV("stopOutput() output %d, stream %d, session %d", outputDesc->mIoHandle, stream, session);
if (outputDesc->mRefCount[stream] == 1) {
// Automatically disable the remote submix input when output is stopped on a
@@ -1394,11 +1571,6 @@
// always handle stream stop, check which stream type is stopping
handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
- // handle special case for sonification while in call
- if (isInCall()) {
- handleIncallSonification(stream, false, false);
- }
-
if (outputDesc->mRefCount[stream] > 0) {
// decrement usage count of this stream on the output
outputDesc->changeRefCount(stream, -1);
@@ -1455,32 +1627,35 @@
}
}
-void AudioPolicyManager::releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream __unused,
- audio_session_t session __unused)
+void AudioPolicyManager::releaseOutput(audio_port_handle_t portId)
{
- ALOGV("releaseOutput() %d", output);
- ssize_t index = mOutputs.indexOfKey(output);
- if (index < 0) {
- ALOGW("releaseOutput() releasing unknown output %d", output);
+ ALOGV("%s portId %d", __FUNCTION__, portId);
+
+ sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId);
+ if (outputDesc == 0) {
+ ALOGW("releaseOutput() no output for client %d", portId);
return;
}
+ sp<TrackClientDescriptor> client = outputDesc->clients()[portId];
+ audio_session_t session = client->session();
+
+ ALOGV("releaseOutput() %d", outputDesc->mIoHandle);
// Routing
mOutputRoutes.removeRoute(session);
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(index);
- if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
- if (desc->mDirectOpenCount <= 0) {
+ if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+ if (outputDesc->mDirectOpenCount <= 0) {
ALOGW("releaseOutput() invalid open count %d for output %d",
- desc->mDirectOpenCount, output);
+ outputDesc->mDirectOpenCount, outputDesc->mIoHandle);
return;
}
- if (--desc->mDirectOpenCount == 0) {
- closeOutput(output);
+ if (--outputDesc->mDirectOpenCount == 0) {
+ closeOutput(outputDesc->mIoHandle);
mpClientInterface->onAudioPortListUpdate();
}
}
+ outputDesc->clients().erase(portId);
}
@@ -1505,6 +1680,9 @@
audio_source_t inputSource = attr->source;
AudioMix *policyMix = NULL;
DeviceVector inputDevices;
+ sp<AudioInputDescriptor> inputDesc;
+ sp<RecordClientDescriptor> clientDesc;
+ audio_port_handle_t requestedDeviceId = *selectedDeviceId;
if (inputSource == AUDIO_SOURCE_DEFAULT) {
inputSource = AUDIO_SOURCE_MIC;
@@ -1555,12 +1733,12 @@
if (*portId == AUDIO_PORT_HANDLE_NONE) {
*portId = AudioPort::getNextUniqueId();
}
- inputDevices = mAvailableInputDevices.getDevicesFromType(inputDesc->mDevice);
+ inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(inputDesc->mDevice);
*selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
ALOGI("%s reusing MMAP input %d for session %d", __FUNCTION__, *input, session);
- return NO_ERROR;
+ goto exit;
}
*input = AUDIO_IO_HANDLE_NONE;
@@ -1622,12 +1800,18 @@
goto error;
}
- inputDevices = mAvailableInputDevices.getDevicesFromType(device);
+ inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
*selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
- ALOGV("getInputForAttr() returns input %d type %d selectedDeviceId %d",
- *input, *inputType, *selectedDeviceId);
+exit:
+ clientDesc = new RecordClientDescriptor(*portId, uid, session,
+ *attr, *config, requestedDeviceId, inputSource, flags);
+ inputDesc = mInputs.valueFor(*input);
+ inputDesc->clients().emplace(*portId, clientDesc);
+
+ ALOGV("getInputForAttr() returns input %d type %d selectedDeviceId %d for port ID %d",
+ *input, *inputType, *selectedDeviceId, *portId);
return NO_ERROR;
@@ -1709,74 +1893,7 @@
config->channel_mask,
flags,
uid,
- isSoundTrigger,
- policyMix, mpClientInterface);
-
-// FIXME: disable concurrent capture until UI is ready
-#if 0
- // reuse an open input if possible
- sp<AudioInputDescriptor> reusedInputDesc;
- for (size_t i = 0; i < mInputs.size(); i++) {
- sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
- // reuse input if:
- // - it shares the same profile
- // AND
- // - it is not a reroute submix input
- // AND
- // - it is: not used for sound trigger
- // OR
- // used for sound trigger and all clients use the same session ID
- //
- if ((profile == desc->mProfile) &&
- (isSoundTrigger == desc->isSoundTrigger()) &&
- !is_virtual_input_device(device)) {
-
- sp<AudioSession> as = desc->getAudioSession(session);
- if (as != 0) {
- // do not allow unmatching properties on same session
- if (as->matches(audioSession)) {
- as->changeOpenCount(1);
- } else {
- ALOGW("getInputForDevice() record with different attributes"
- " exists for session %d", session);
- continue;
- }
- } else if (isSoundTrigger) {
- continue;
- }
-
- // Reuse the already opened input stream on this profile if:
- // - the new capture source is background OR
- // - the path requested configurations match OR
- // - the new source priority is less than the highest source priority on this input
- // If the input stream cannot be reused, close it before opening a new stream
- // on the same profile for the new client so that the requested path configuration
- // can be selected.
- if (!isConcurrentSource(inputSource) &&
- ((desc->mSamplingRate != samplingRate ||
- desc->mChannelMask != config->channel_mask ||
- !audio_formats_match(desc->mFormat, config->format)) &&
- (source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
- source_priority(inputSource)))) {
- reusedInputDesc = desc;
- continue;
- } else {
- desc->addAudioSession(session, audioSession);
- ALOGV("%s: reusing input %d", __FUNCTION__, mInputs.keyAt(i));
- return mInputs.keyAt(i);
- }
- }
- }
-
- if (reusedInputDesc != 0) {
- AudioSessionCollection sessions = reusedInputDesc->getAudioSessions(false /*activeOnly*/);
- for (size_t j = 0; j < sessions.size(); j++) {
- audio_session_t currentSession = sessions.keyAt(j);
- stopInput(reusedInputDesc->mIoHandle, currentSession);
- releaseInput(reusedInputDesc->mIoHandle, currentSession);
- }
- }
-#endif
+ isSoundTrigger);
if (!profile->canOpenNewIo()) {
return AUDIO_IO_HANDLE_NONE;
@@ -1790,7 +1907,7 @@
lConfig.format = profileFormat;
if (address == "") {
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
+ DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
// the inputs vector must be of size >= 1, but we don't want to crash here
address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
}
@@ -1897,44 +2014,32 @@
}
-status_t AudioPolicyManager::startInput(audio_io_handle_t input,
- audio_session_t session,
+status_t AudioPolicyManager::startInput(audio_port_handle_t portId,
bool silenced,
concurrency_type__mask_t *concurrency)
{
+ *concurrency = API_INPUT_CONCURRENCY_NONE;
+
+ ALOGV("%s portId %d", __FUNCTION__, portId);
+
+ sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
+ if (inputDesc == 0) {
+ ALOGW("startInput() no input for client %d", portId);
+ return BAD_VALUE;
+ }
+ sp<RecordClientDescriptor> client = inputDesc->clients()[portId];
+ audio_session_t session = client->session();
+ audio_io_handle_t input = inputDesc->mIoHandle;
ALOGV("AudioPolicyManager::startInput(input:%d, session:%d, silenced:%d, concurrency:%d)",
input, session, silenced, *concurrency);
- *concurrency = API_INPUT_CONCURRENCY_NONE;
-
- ssize_t index = mInputs.indexOfKey(input);
- if (index < 0) {
- ALOGW("startInput() unknown input %d", input);
- return BAD_VALUE;
- }
- sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
-
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
if (audioSession == 0) {
ALOGW("startInput() unknown session %d on input %d", session, input);
return BAD_VALUE;
}
-// FIXME: disable concurrent capture until UI is ready
-#if 0
- if (!isConcurentCaptureAllowed(inputDesc, audioSession)) {
- ALOGW("startInput(%d) failed: other input already started", input);
- return INVALID_OPERATION;
- }
-
- if (isInCall()) {
- *concurrency |= API_INPUT_CONCURRENCY_CALL;
- }
- if (mInputs.activeInputsCountOnDevices() != 0) {
- *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
- }
-#else
if (!is_virtual_input_device(inputDesc->mDevice)) {
if (mCallTxPatch != 0 &&
inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
@@ -1959,11 +2064,8 @@
true /*activeOnly*/);
sp<AudioSession> activeSession = activeSessions.valueAt(0);
if (activeSession->isSilenced()) {
- audio_io_handle_t activeInput = activeDesc->mIoHandle;
- audio_session_t activeSessionId = activeSession->session();
- stopInput(activeInput, activeSessionId);
- releaseInput(activeInput, activeSessionId);
- ALOGV("startInput(%d) stopping silenced input %d", input, activeInput);
+ closeSession(activeDesc, activeSession);
+ ALOGV("startInput() session %d stopping silenced session %d", session, activeSession->session());
activeInputs = mInputs.getActiveInputs();
}
}
@@ -2017,27 +2119,24 @@
if (activeSource == AUDIO_SOURCE_HOTWORD) {
AudioSessionCollection activeSessions =
activeDesc->getAudioSessions(true /*activeOnly*/);
- audio_session_t activeSession = activeSessions.keyAt(0);
- audio_io_handle_t activeHandle = activeDesc->mIoHandle;
+ sp<AudioSession> activeSession = activeSessions[0];
SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
*concurrency |= API_INPUT_CONCURRENCY_PREEMPT;
- sessions.add(activeSession);
+ sessions.add(activeSession->session());
inputDesc->setPreemptedSessions(sessions);
- stopInput(activeHandle, activeSession);
- releaseInput(activeHandle, activeSession);
+ closeSession(inputDesc, activeSession);
ALOGV("startInput(%d) for HOTWORD preempting HOTWORD input %d",
input, activeDesc->mIoHandle);
}
}
}
-#endif
// Make sure we start with the correct silence state
audioSession->setSilenced(silenced);
// increment activity count before calling getNewInputDevice() below as only active sessions
// are considered for device selection
- audioSession->changeActiveCount(1);
+ inputDesc->changeRefCount(session, 1);
// Routing?
mInputRoutes.incRouteActivity(session);
@@ -2051,7 +2150,7 @@
status_t status = inputDesc->start();
if (status != NO_ERROR) {
mInputRoutes.decRouteActivity(session);
- audioSession->changeActiveCount(-1);
+ inputDesc->changeRefCount(session, -1);
return status;
}
@@ -2093,29 +2192,29 @@
return NO_ERROR;
}
-status_t AudioPolicyManager::stopInput(audio_io_handle_t input,
- audio_session_t session)
+status_t AudioPolicyManager::stopInput(audio_port_handle_t portId)
{
- ALOGV("stopInput() input %d", input);
- ssize_t index = mInputs.indexOfKey(input);
- if (index < 0) {
- ALOGW("stopInput() unknown input %d", input);
+ ALOGV("%s portId %d", __FUNCTION__, portId);
+
+ sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
+ if (inputDesc == 0) {
+ ALOGW("stopInput() no input for client %d", portId);
return BAD_VALUE;
}
- sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
+ sp<RecordClientDescriptor> client = inputDesc->clients()[portId];
+ audio_session_t session = client->session();
+ audio_io_handle_t input = inputDesc->mIoHandle;
+
+ ALOGV("stopInput() input %d", input);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
- if (index < 0) {
- ALOGW("stopInput() unknown session %d on input %d", session, input);
- return BAD_VALUE;
- }
if (audioSession->activeCount() == 0) {
ALOGW("stopInput() input %d already stopped", input);
return INVALID_OPERATION;
}
- audioSession->changeActiveCount(-1);
+ inputDesc->changeRefCount(session, -1);
// Routing?
mInputRoutes.decRouteActivity(session);
@@ -2164,22 +2263,24 @@
return NO_ERROR;
}
-void AudioPolicyManager::releaseInput(audio_io_handle_t input,
- audio_session_t session)
+void AudioPolicyManager::releaseInput(audio_port_handle_t portId)
{
- ALOGV("releaseInput() %d", input);
- ssize_t index = mInputs.indexOfKey(input);
- if (index < 0) {
- ALOGW("releaseInput() releasing unknown input %d", input);
+ ALOGV("%s portId %d", __FUNCTION__, portId);
+
+ sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
+ if (inputDesc == 0) {
+ ALOGW("releaseInput() no input for client %d", portId);
return;
}
+ sp<RecordClientDescriptor> client = inputDesc->clients()[portId];
+ audio_session_t session = client->session();
+ audio_io_handle_t input = inputDesc->mIoHandle;
+
+ ALOGV("releaseInput() %d", input);
// Routing
mInputRoutes.removeRoute(session);
- sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
- ALOG_ASSERT(inputDesc != 0);
-
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
if (audioSession == 0) {
ALOGW("releaseInput() unknown session %d on input %d", session, input);
@@ -2202,10 +2303,31 @@
}
closeInput(input);
+ inputDesc->clients().erase(portId);
mpClientInterface->onAudioPortListUpdate();
ALOGV("releaseInput() exit");
}
+void AudioPolicyManager::closeSessions(const sp<AudioInputDescriptor>& input, bool activeOnly)
+{
+ AudioSessionCollection sessions = input->getAudioSessions(activeOnly /*activeOnly*/);
+ for (size_t i = 0; i < sessions.size(); i++) {
+ closeSession(input, sessions[i]);
+ }
+}
+
+void AudioPolicyManager::closeSession(const sp<AudioInputDescriptor>& input,
+ const sp<AudioSession>& session)
+{
+ RecordClientVector clients = input->getClientsForSession(session->session());
+
+ for (const auto& client : clients) {
+ stopInput(client->portId());
+ releaseInput(client->portId());
+ }
+}
+
+
void AudioPolicyManager::closeAllInputs() {
bool patchRemoved = false;
@@ -2289,11 +2411,10 @@
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
- if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+ if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
continue;
}
- if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
- (isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
+ if (!(desc->isStreamActive((audio_stream_type_t)curStream) || isInCall())) {
continue;
}
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
@@ -2311,13 +2432,15 @@
applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
stream, curStreamDevice);
}
-
+ // rescale index before applying to curStream as ranges may be different for
+ // stream and curStream
+ int idx = rescaleVolumeIndex(index, stream, (audio_stream_type_t)curStream);
if (applyVolume) {
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
status_t volStatus =
- checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice,
+ checkAndSetVolume((audio_stream_type_t)curStream, idx, desc, curDevice,
(stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
if (volStatus != NO_ERROR) {
status = volStatus;
@@ -2637,42 +2760,24 @@
status_t AudioPolicyManager::dump(int fd)
{
- const size_t SIZE = 256;
- char buffer[SIZE];
String8 result;
-
- snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
- result.append(buffer);
-
- snprintf(buffer, SIZE, " Primary Output: %d\n",
+ result.appendFormat("\nAudioPolicyManager Dump: %p\n", this);
+ result.appendFormat(" Primary Output: %d\n",
hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
- result.append(buffer);
std::string stateLiteral;
AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
- snprintf(buffer, SIZE, " Phone state: %s\n", stateLiteral.c_str());
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for communications %d\n",
- mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for media %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for record %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for dock %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for system %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
- mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for encoded surround output %d\n",
- mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND));
- result.append(buffer);
- snprintf(buffer, SIZE, " TTS output %s\n", mTtsOutputAvailable ? "available" : "not available");
- result.append(buffer);
- snprintf(buffer, SIZE, " Master mono: %s\n", mMasterMono ? "on" : "off");
- result.append(buffer);
-
+ result.appendFormat(" Phone state: %s\n", stateLiteral.c_str());
+ const char* forceUses[AUDIO_POLICY_FORCE_USE_CNT] = {
+ "communications", "media", "record", "dock", "system",
+ "HDMI system audio", "encoded surround output", "vibrate ringing" };
+ for (audio_policy_force_use_t i = AUDIO_POLICY_FORCE_FOR_COMMUNICATION;
+ i < AUDIO_POLICY_FORCE_USE_CNT; i = (audio_policy_force_use_t)((int)i + 1)) {
+ result.appendFormat(" Force use for %s: %d\n",
+ forceUses[i], mEngine->getForceUse(i));
+ }
+ result.appendFormat(" TTS output %savailable\n", mTtsOutputAvailable ? "" : "not ");
+ result.appendFormat(" Master mono: %s\n", mMasterMono ? "on" : "off");
+ result.appendFormat(" Config source: %s\n", getConfig().getSource().c_str());
write(fd, result.string(), result.size());
mAvailableOutputDevices.dump(fd, String8("Available output"));
@@ -2684,6 +2789,7 @@
mEffects.dump(fd);
mAudioPatches.dump(fd);
mPolicyMixes.dump(fd);
+ mAudioSources.dump(fd);
return NO_ERROR;
}
@@ -2868,8 +2974,7 @@
}
ALOGV("createAudioPatch() num sources %d num sinks %d", patch->num_sources, patch->num_sinks);
- if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
- patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+ if (!audio_patch_is_valid(patch)) {
return BAD_VALUE;
}
// only one source per audio patch supported for now
@@ -3088,28 +3193,8 @@
}
// TODO: check from routing capabilities in config file and other conflicting patches
- audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- if (index >= 0) {
- afPatchHandle = patchDesc->mAfPatchHandle;
- }
-
- status_t status = mpClientInterface->createAudioPatch(&newPatch,
- &afPatchHandle,
- 0);
- ALOGV("createAudioPatch() patch panel returned %d patchHandle %d",
- status, afPatchHandle);
- if (status == NO_ERROR) {
- if (index < 0) {
- patchDesc = new AudioPatch(&newPatch, uid);
- addAudioPatch(patchDesc->mHandle, patchDesc);
- } else {
- patchDesc->mPatch = newPatch;
- }
- patchDesc->mAfPatchHandle = afPatchHandle;
- *handle = patchDesc->mHandle;
- nextAudioPortGeneration();
- mpClientInterface->onAudioPatchListUpdate();
- } else {
+ status_t status = installPatch(__func__, index, handle, &newPatch, 0, uid, &patchDesc);
+ if (status != NO_ERROR) {
ALOGW("createAudioPatch() patch panel could not connect device patch, error %d",
status);
return INVALID_OPERATION;
@@ -3242,10 +3327,10 @@
return BAD_VALUE;
}
- struct audio_port_config backupConfig;
+ struct audio_port_config backupConfig = {};
status_t status = audioPortConfig->applyAudioPortConfig(config, &backupConfig);
if (status == NO_ERROR) {
- struct audio_port_config newConfig;
+ struct audio_port_config newConfig = {};
audioPortConfig->toAudioPortConfig(&newConfig, config);
status = mpClientInterface->setAudioPortConfig(&newConfig, 0);
}
@@ -3347,8 +3432,8 @@
void AudioPolicyManager::clearAudioSources(uid_t uid)
{
for (ssize_t i = (ssize_t)mAudioSources.size() - 1; i >= 0; i--) {
- sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
- if (sourceDesc->mUid == uid) {
+ sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+ if (sourceDesc->uid() == uid) {
stopAudioSource(mAudioSources.keyAt(i));
}
}
@@ -3366,20 +3451,23 @@
}
status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source,
- const audio_attributes_t *attributes,
- audio_patch_handle_t *handle,
- uid_t uid)
+ const audio_attributes_t *attributes,
+ audio_port_handle_t *portId,
+ uid_t uid)
{
- ALOGV("%s source %p attributes %p handle %p", __FUNCTION__, source, attributes, handle);
- if (source == NULL || attributes == NULL || handle == NULL) {
+ ALOGV("%s", __FUNCTION__);
+ *portId = AUDIO_PORT_HANDLE_NONE;
+
+ if (source == NULL || attributes == NULL || portId == NULL) {
+ ALOGW("%s invalid argument: source %p attributes %p handle %p",
+ __FUNCTION__, source, attributes, portId);
return BAD_VALUE;
}
- *handle = AUDIO_PATCH_HANDLE_NONE;
-
if (source->role != AUDIO_PORT_ROLE_SOURCE ||
source->type != AUDIO_PORT_TYPE_DEVICE) {
- ALOGV("%s INVALID_OPERATION source->role %d source->type %d", __FUNCTION__, source->role, source->type);
+ ALOGW("%s INVALID_OPERATION source->role %d source->type %d",
+ __FUNCTION__, source->role, source->type);
return INVALID_OPERATION;
}
@@ -3387,41 +3475,43 @@
mAvailableInputDevices.getDevice(source->ext.device.type,
String8(source->ext.device.address));
if (srcDeviceDesc == 0) {
- ALOGV("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
+ ALOGW("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
return BAD_VALUE;
}
- sp<AudioSourceDescriptor> sourceDesc =
- new AudioSourceDescriptor(srcDeviceDesc, attributes, uid);
- struct audio_patch dummyPatch;
+ *portId = AudioPort::getNextUniqueId();
+
+ struct audio_patch dummyPatch = {};
sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
- sourceDesc->mPatchDesc = patchDesc;
+
+ sp<SourceClientDescriptor> sourceDesc =
+ new SourceClientDescriptor(*portId, uid, *attributes, patchDesc, srcDeviceDesc,
+ streamTypefromAttributesInt(attributes));
status_t status = connectAudioSource(sourceDesc);
if (status == NO_ERROR) {
- mAudioSources.add(sourceDesc->getHandle(), sourceDesc);
- *handle = sourceDesc->getHandle();
+ mAudioSources.add(*portId, sourceDesc);
}
return status;
}
-status_t AudioPolicyManager::connectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
+status_t AudioPolicyManager::connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
{
- ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
+ ALOGV("%s handle %d", __FUNCTION__, sourceDesc->portId());
// make sure we only have one patch per source.
disconnectAudioSource(sourceDesc);
- routing_strategy strategy = (routing_strategy) getStrategyForAttr(&sourceDesc->mAttributes);
- audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
- sp<DeviceDescriptor> srcDeviceDesc = sourceDesc->mDevice;
+ audio_attributes_t attributes = sourceDesc->attributes();
+ routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
+ audio_stream_type_t stream = sourceDesc->stream();
+ sp<DeviceDescriptor> srcDeviceDesc = sourceDesc->srcDevice();
audio_devices_t sinkDevice = getDeviceForStrategy(strategy, true);
sp<DeviceDescriptor> sinkDeviceDesc =
mAvailableOutputDevices.getDevice(sinkDevice, String8(""));
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- struct audio_patch *patch = &sourceDesc->mPatchDesc->mPatch;
if (srcDeviceDesc->getAudioPort()->mModule->getHandle() ==
sinkDeviceDesc->getAudioPort()->mModule->getHandle() &&
@@ -3453,16 +3543,14 @@
// be connected as well as the stream type for volume control
// - the sink is defined by whatever output device is currently selected for the output
// though which this patch is routed.
- patch->num_sinks = 0;
- patch->num_sources = 2;
- srcDeviceDesc->toAudioPortConfig(&patch->sources[0], NULL);
- outputDesc->toAudioPortConfig(&patch->sources[1], NULL);
- patch->sources[1].ext.mix.usecase.stream = stream;
- status = mpClientInterface->createAudioPatch(patch,
+ PatchBuilder patchBuilder;
+ patchBuilder.addSource(srcDeviceDesc).addSource(outputDesc, { .stream = stream });
+ status = mpClientInterface->createAudioPatch(patchBuilder.patch(),
&afPatchHandle,
0);
ALOGV("%s patch panel returned %d patchHandle %d", __FUNCTION__,
status, afPatchHandle);
+ sourceDesc->patchDesc()->mPatch = *patchBuilder.patch();
if (status != NO_ERROR) {
ALOGW("%s patch panel could not connect device patch, error %d",
__FUNCTION__, status);
@@ -3472,32 +3560,32 @@
status = startSource(outputDesc, stream, sinkDevice, NULL, &delayMs);
if (status != NO_ERROR) {
- mpClientInterface->releaseAudioPatch(sourceDesc->mPatchDesc->mAfPatchHandle, 0);
+ mpClientInterface->releaseAudioPatch(sourceDesc->patchDesc()->mAfPatchHandle, 0);
return status;
}
- sourceDesc->mSwOutput = outputDesc;
+ sourceDesc->setSwOutput(outputDesc);
if (delayMs != 0) {
usleep(delayMs * 1000);
}
}
- sourceDesc->mPatchDesc->mAfPatchHandle = afPatchHandle;
- addAudioPatch(sourceDesc->mPatchDesc->mHandle, sourceDesc->mPatchDesc);
+ sourceDesc->patchDesc()->mAfPatchHandle = afPatchHandle;
+ addAudioPatch(sourceDesc->patchDesc()->mHandle, sourceDesc->patchDesc());
return NO_ERROR;
}
-status_t AudioPolicyManager::stopAudioSource(audio_patch_handle_t handle __unused)
+status_t AudioPolicyManager::stopAudioSource(audio_port_handle_t portId)
{
- sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueFor(handle);
- ALOGV("%s handle %d", __FUNCTION__, handle);
+ sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueFor(portId);
+ ALOGV("%s port ID %d", __FUNCTION__, portId);
if (sourceDesc == 0) {
- ALOGW("%s unknown source for handle %d", __FUNCTION__, handle);
+ ALOGW("%s unknown source for port ID %d", __FUNCTION__, portId);
return BAD_VALUE;
}
status_t status = disconnectAudioSource(sourceDesc);
- mAudioSources.removeItem(handle);
+ mAudioSources.removeItem(portId);
return status;
}
@@ -3625,7 +3713,7 @@
}
}
// Open an output to query dynamic parameters.
- DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromType(
+ DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(
AUDIO_DEVICE_OUT_HDMI);
for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
String8 address = hdmiOutputDevices[i]->mAddress;
@@ -3751,7 +3839,7 @@
sp<SwAudioOutputDescriptor> outputDesc;
bool profileUpdated = false;
- DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromType(
+ DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(
AUDIO_DEVICE_OUT_HDMI);
for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
// Simulate reconnection to update enabled surround sound formats.
@@ -3770,7 +3858,7 @@
name.c_str());
profileUpdated |= (status == NO_ERROR);
}
- DeviceVector hdmiInputDevices = mAvailableInputDevices.getDevicesFromType(
+ DeviceVector hdmiInputDevices = mAvailableInputDevices.getDevicesFromTypeMask(
AUDIO_DEVICE_IN_HDMI);
for (size_t i = 0; i < hdmiInputDevices.size(); i++) {
// Simulate reconnection to update enabled surround sound formats.
@@ -3831,20 +3919,20 @@
}
}
-status_t AudioPolicyManager::disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
+status_t AudioPolicyManager::disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
{
- ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
+ ALOGV("%s port Id %d", __FUNCTION__, sourceDesc->portId());
- sp<AudioPatch> patchDesc = mAudioPatches.valueFor(sourceDesc->mPatchDesc->mHandle);
+ sp<AudioPatch> patchDesc = mAudioPatches.valueFor(sourceDesc->patchDesc()->mHandle);
if (patchDesc == 0) {
ALOGW("%s source has no patch with handle %d", __FUNCTION__,
- sourceDesc->mPatchDesc->mHandle);
+ sourceDesc->patchDesc()->mHandle);
return BAD_VALUE;
}
- removeAudioPatch(sourceDesc->mPatchDesc->mHandle);
+ removeAudioPatch(sourceDesc->patchDesc()->mHandle);
- audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
- sp<SwAudioOutputDescriptor> swOutputDesc = sourceDesc->mSwOutput.promote();
+ audio_stream_type_t stream = sourceDesc->stream();
+ sp<SwAudioOutputDescriptor> swOutputDesc = sourceDesc->swOutput().promote();
if (swOutputDesc != 0) {
status_t status = stopSource(swOutputDesc, stream, false);
if (status == NO_ERROR) {
@@ -3852,7 +3940,7 @@
}
mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
} else {
- sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->mHwOutput.promote();
+ sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->hwOutput().promote();
if (hwOutputDesc != 0) {
// release patch between src device and output device
// close Hwoutput and remove from mHwOutputs
@@ -3863,15 +3951,16 @@
return NO_ERROR;
}
-sp<AudioSourceDescriptor> AudioPolicyManager::getSourceForStrategyOnOutput(
+sp<SourceClientDescriptor> AudioPolicyManager::getSourceForStrategyOnOutput(
audio_io_handle_t output, routing_strategy strategy)
{
- sp<AudioSourceDescriptor> source;
+ sp<SourceClientDescriptor> source;
for (size_t i = 0; i < mAudioSources.size(); i++) {
- sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
+ sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+ audio_attributes_t attributes = sourceDesc->attributes();
routing_strategy sourceStrategy =
- (routing_strategy) getStrategyForAttr(&sourceDesc->mAttributes);
- sp<SwAudioOutputDescriptor> outputDesc = sourceDesc->mSwOutput.promote();
+ (routing_strategy) getStrategyForAttr(&attributes);
+ sp<SwAudioOutputDescriptor> outputDesc = sourceDesc->swOutput().promote();
if (sourceStrategy == strategy && outputDesc != 0 && outputDesc->mIoHandle == output) {
source = sourceDesc;
break;
@@ -3914,6 +4003,7 @@
"%s/%s", kConfigLocationList[i], fileName);
ret = serializer.deserialize(audioPolicyXmlConfigFile, config);
if (ret == NO_ERROR) {
+ config.setSource(audioPolicyXmlConfigFile);
return ret;
}
}
@@ -3925,7 +4015,7 @@
AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface,
bool /*forTesting*/)
:
- mUidCached(getuid()),
+ mUidCached(AID_AUDIOSERVER), // no need to call getuid(), there's only one of us running.
mpClientInterface(clientInterface),
mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
mA2dpSuspended(false),
@@ -4036,7 +4126,8 @@
sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
mpClientInterface);
const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
- const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
+ const DeviceVector &devicesForType = supportedDevices.getDevicesFromTypeMask(
+ profileType);
String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
: String8("");
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
@@ -4090,7 +4181,7 @@
sp<AudioInputDescriptor> inputDesc =
new AudioInputDescriptor(inProfile, mpClientInterface);
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
+ DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(profileType);
// the inputs vector must be of size >= 1, but we don't want to crash here
String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
: String8("");
@@ -4665,6 +4756,7 @@
nextAudioPortGeneration();
+ audio_devices_t device = inputDesc->mDevice;
ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
@@ -4675,6 +4767,12 @@
inputDesc->close();
mInputs.removeItem(input);
+
+ audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
+ if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+ mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
+ SoundTrigger::setCaptureState(false);
+ }
}
SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(
@@ -4696,18 +4794,17 @@
return outputs;
}
-bool AudioPolicyManager::vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
- SortedVector<audio_io_handle_t>& outputs2)
+void AudioPolicyManager::checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked)
{
- if (outputs1.size() != outputs2.size()) {
- return false;
+ // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
+ // output is suspended before any tracks are moved to it
+ checkA2dpSuspend();
+ checkOutputForAllStrategies();
+ if (onOutputsChecked != nullptr && onOutputsChecked()) checkA2dpSuspend();
+ updateDevicesAndOutputs();
+ if (mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD) != 0) {
+ setMsdPatch();
}
- for (size_t i = 0; i < outputs1.size(); i++) {
- if (outputs1[i] != outputs2[i]) {
- return false;
- }
- }
- return true;
}
void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
@@ -4735,7 +4832,7 @@
}
}
- if (!vectorsEqual(srcOutputs,dstOutputs)) {
+ if (srcOutputs != dstOutputs) {
// get maximum latency of all source outputs to determine the minimum mute time guaranteeing
// audio from invalidated tracks will be rendered when unmuting
uint32_t maxLatency = 0;
@@ -4754,7 +4851,7 @@
setStrategyMute(strategy, true, desc);
setStrategyMute(strategy, false, desc, maxLatency * LATENCY_MUTE_FACTOR, newDevice);
}
- sp<AudioSourceDescriptor> source =
+ sp<SourceClientDescriptor> source =
getSourceForStrategyOnOutput(srcOut, strategy);
if (source != 0){
connectAudioSource(source);
@@ -5259,54 +5356,20 @@
} else {
DeviceVector deviceList;
if ((address == NULL) || (strlen(address) == 0)) {
- deviceList = mAvailableOutputDevices.getDevicesFromType(device);
+ deviceList = mAvailableOutputDevices.getDevicesFromTypeMask(device);
} else {
- deviceList = mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
+ sp<DeviceDescriptor> deviceDesc = mAvailableOutputDevices.getDevice(
+ device, String8(address));
+ if (deviceDesc) deviceList.add(deviceDesc);
}
if (!deviceList.isEmpty()) {
- struct audio_patch patch;
- outputDesc->toAudioPortConfig(&patch.sources[0]);
- patch.num_sources = 1;
- patch.num_sinks = 0;
+ PatchBuilder patchBuilder;
+ patchBuilder.addSource(outputDesc);
for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++) {
- deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
- patch.num_sinks++;
+ patchBuilder.addSink(deviceList.itemAt(i));
}
- ssize_t index;
- if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
- index = mAudioPatches.indexOfKey(*patchHandle);
- } else {
- index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
- }
- sp< AudioPatch> patchDesc;
- audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- if (index >= 0) {
- patchDesc = mAudioPatches.valueAt(index);
- afPatchHandle = patchDesc->mAfPatchHandle;
- }
-
- status_t status = mpClientInterface->createAudioPatch(&patch,
- &afPatchHandle,
- delayMs);
- ALOGV("setOutputDevice() createAudioPatch returned %d patchHandle %d"
- "num_sources %d num_sinks %d",
- status, afPatchHandle, patch.num_sources, patch.num_sinks);
- if (status == NO_ERROR) {
- if (index < 0) {
- patchDesc = new AudioPatch(&patch, mUidCached);
- addAudioPatch(patchDesc->mHandle, patchDesc);
- } else {
- patchDesc->mPatch = patch;
- }
- patchDesc->mAfPatchHandle = afPatchHandle;
- if (patchHandle) {
- *patchHandle = patchDesc->mHandle;
- }
- outputDesc->setPatchHandle(patchDesc->mHandle);
- nextAudioPortGeneration();
- mpClientInterface->onAudioPatchListUpdate();
- }
+ installPatch(__func__, patchHandle, outputDesc.get(), patchBuilder.patch(), delayMs);
}
// inform all input as well
@@ -5364,53 +5427,21 @@
if ((device != AUDIO_DEVICE_NONE) && ((device != inputDesc->mDevice) || force)) {
inputDesc->mDevice = device;
- DeviceVector deviceList = mAvailableInputDevices.getDevicesFromType(device);
+ DeviceVector deviceList = mAvailableInputDevices.getDevicesFromTypeMask(device);
if (!deviceList.isEmpty()) {
- struct audio_patch patch;
- inputDesc->toAudioPortConfig(&patch.sinks[0]);
+ PatchBuilder patchBuilder;
+ patchBuilder.addSink(inputDesc,
// AUDIO_SOURCE_HOTWORD is for internal use only:
// handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
- if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD &&
- !inputDesc->isSoundTrigger()) {
- patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
- }
- patch.num_sinks = 1;
+ [inputDesc](const PatchBuilder::mix_usecase_t& usecase) {
+ auto result = usecase;
+ if (result.source == AUDIO_SOURCE_HOTWORD && !inputDesc->isSoundTrigger()) {
+ result.source = AUDIO_SOURCE_VOICE_RECOGNITION;
+ }
+ return result; }).
//only one input device for now
- deviceList.itemAt(0)->toAudioPortConfig(&patch.sources[0]);
- patch.num_sources = 1;
- ssize_t index;
- if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
- index = mAudioPatches.indexOfKey(*patchHandle);
- } else {
- index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
- }
- sp< AudioPatch> patchDesc;
- audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- if (index >= 0) {
- patchDesc = mAudioPatches.valueAt(index);
- afPatchHandle = patchDesc->mAfPatchHandle;
- }
-
- status_t status = mpClientInterface->createAudioPatch(&patch,
- &afPatchHandle,
- 0);
- ALOGV("setInputDevice() createAudioPatch returned %d patchHandle %d",
- status, afPatchHandle);
- if (status == NO_ERROR) {
- if (index < 0) {
- patchDesc = new AudioPatch(&patch, mUidCached);
- addAudioPatch(patchDesc->mHandle, patchDesc);
- } else {
- patchDesc->mPatch = patch;
- }
- patchDesc->mAfPatchHandle = afPatchHandle;
- if (patchHandle) {
- *patchHandle = patchDesc->mHandle;
- }
- inputDesc->setPatchHandle(patchDesc->mHandle);
- nextAudioPortGeneration();
- mpClientInterface->onAudioPatchListUpdate();
- }
+ addSource(deviceList.itemAt(0));
+ status = installPatch(__func__, patchHandle, inputDesc.get(), patchBuilder.patch(), 0);
}
}
return status;
@@ -5544,8 +5575,8 @@
return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
}
- // in-call: always cap earpiece volume by voice volume + some low headroom
- if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) &&
+ // in-call: always cap volume by voice volume + some low headroom
+ if ((stream != AUDIO_STREAM_VOICE_CALL) &&
(isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
switch (stream) {
case AUDIO_STREAM_SYSTEM:
@@ -5557,9 +5588,9 @@
case AUDIO_STREAM_DTMF:
case AUDIO_STREAM_ACCESSIBILITY: {
int voiceVolumeIndex =
- mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, AUDIO_DEVICE_OUT_EARPIECE);
+ mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
const float maxVoiceVolDb =
- computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, AUDIO_DEVICE_OUT_EARPIECE)
+ computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
+ IN_CALL_EARPIECE_HEADROOM_DB;
if (volumeDB > maxVoiceVolDb) {
ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
@@ -5630,6 +5661,21 @@
return volumeDB;
}
+int AudioPolicyManager::rescaleVolumeIndex(int srcIndex,
+ audio_stream_type_t srcStream,
+ audio_stream_type_t dstStream)
+{
+ if (srcStream == dstStream) {
+ return srcIndex;
+ }
+ float minSrc = (float)mVolumeCurves->getVolumeIndexMin(srcStream);
+ float maxSrc = (float)mVolumeCurves->getVolumeIndexMax(srcStream);
+ float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
+ float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
+
+ return (int)(minDst + ((srcIndex - minSrc) * (maxDst - minDst)) / (maxSrc - minSrc));
+}
+
status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
int index,
const sp<AudioOutputDescriptor>& outputDesc,
@@ -5756,55 +5802,6 @@
}
}
-void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
- bool starting, bool stateChange)
-{
- if(!hasPrimaryOutput()) {
- return;
- }
-
- // if the stream pertains to sonification strategy and we are in call we must
- // mute the stream if it is low visibility. If it is high visibility, we must play a tone
- // in the device used for phone strategy and play the tone if the selected device does not
- // interfere with the device used for phone strategy
- // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
- // many times as there are active tracks on the output
- const routing_strategy stream_strategy = getStrategy(stream);
- if ((stream_strategy == STRATEGY_SONIFICATION) ||
- ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) {
- sp<SwAudioOutputDescriptor> outputDesc = mPrimaryOutput;
- ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
- stream, starting, outputDesc->mDevice, stateChange);
- if (outputDesc->mRefCount[stream]) {
- int muteCount = 1;
- if (stateChange) {
- muteCount = outputDesc->mRefCount[stream];
- }
- if (audio_is_low_visibility(stream)) {
- ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
- for (int i = 0; i < muteCount; i++) {
- setStreamMute(stream, starting, mPrimaryOutput);
- }
- } else {
- ALOGV("handleIncallSonification() high visibility");
- if (outputDesc->device() &
- getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) {
- ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
- for (int i = 0; i < muteCount; i++) {
- setStreamMute(stream, starting, mPrimaryOutput);
- }
- }
- if (starting) {
- mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
- AUDIO_STREAM_VOICE_CALL);
- } else {
- mpClientInterface->stopTone();
- }
- }
- }
- }
-}
-
audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
{
// flags to stream type mapping
@@ -5818,39 +5815,7 @@
return AUDIO_STREAM_TTS;
}
- // usage to stream type mapping
- switch (attr->usage) {
- case AUDIO_USAGE_MEDIA:
- case AUDIO_USAGE_GAME:
- case AUDIO_USAGE_ASSISTANT:
- case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
- return AUDIO_STREAM_MUSIC;
- case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
- return AUDIO_STREAM_ACCESSIBILITY;
- case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
- return AUDIO_STREAM_SYSTEM;
- case AUDIO_USAGE_VOICE_COMMUNICATION:
- return AUDIO_STREAM_VOICE_CALL;
-
- case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
- return AUDIO_STREAM_DTMF;
-
- case AUDIO_USAGE_ALARM:
- return AUDIO_STREAM_ALARM;
- case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
- return AUDIO_STREAM_RING;
-
- case AUDIO_USAGE_NOTIFICATION:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
- case AUDIO_USAGE_NOTIFICATION_EVENT:
- return AUDIO_STREAM_NOTIFICATION;
-
- case AUDIO_USAGE_UNKNOWN:
- default:
- return AUDIO_STREAM_MUSIC;
- }
+ return audio_usage_to_stream_type(attr->usage);
}
bool AudioPolicyManager::isValidAttributes(const audio_attributes_t *paa)
@@ -5935,10 +5900,10 @@
void AudioPolicyManager::cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc)
{
for (ssize_t i = (ssize_t)mAudioSources.size() - 1; i >= 0; i--) {
- sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
- if (sourceDesc->mDevice->equals(deviceDesc)) {
- ALOGV("%s releasing audio source %d", __FUNCTION__, sourceDesc->getHandle());
- stopAudioSource(sourceDesc->getHandle());
+ sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+ if (sourceDesc->srcDevice()->equals(deviceDesc)) {
+ ALOGV("%s releasing audio source %d", __FUNCTION__, sourceDesc->portId());
+ stopAudioSource(sourceDesc->portId());
}
}
@@ -6180,4 +6145,58 @@
}
}
+status_t AudioPolicyManager::installPatch(const char *caller,
+ audio_patch_handle_t *patchHandle,
+ AudioIODescriptorInterface *ioDescriptor,
+ const struct audio_patch *patch,
+ int delayMs)
+{
+ ssize_t index = mAudioPatches.indexOfKey(
+ patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE ?
+ *patchHandle : ioDescriptor->getPatchHandle());
+ sp<AudioPatch> patchDesc;
+ status_t status = installPatch(
+ caller, index, patchHandle, patch, delayMs, mUidCached, &patchDesc);
+ if (status == NO_ERROR) {
+ ioDescriptor->setPatchHandle(patchDesc->mHandle);
+ }
+ return status;
+}
+
+status_t AudioPolicyManager::installPatch(const char *caller,
+ ssize_t index,
+ audio_patch_handle_t *patchHandle,
+ const struct audio_patch *patch,
+ int delayMs,
+ uid_t uid,
+ sp<AudioPatch> *patchDescPtr)
+{
+ sp<AudioPatch> patchDesc;
+ audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ if (index >= 0) {
+ patchDesc = mAudioPatches.valueAt(index);
+ afPatchHandle = patchDesc->mAfPatchHandle;
+ }
+
+ status_t status = mpClientInterface->createAudioPatch(patch, &afPatchHandle, delayMs);
+ ALOGV("%s() AF::createAudioPatch returned %d patchHandle %d num_sources %d num_sinks %d",
+ caller, status, afPatchHandle, patch->num_sources, patch->num_sinks);
+ if (status == NO_ERROR) {
+ if (index < 0) {
+ patchDesc = new AudioPatch(patch, uid);
+ addAudioPatch(patchDesc->mHandle, patchDesc);
+ } else {
+ patchDesc->mPatch = *patch;
+ }
+ patchDesc->mAfPatchHandle = afPatchHandle;
+ if (patchHandle) {
+ *patchHandle = patchDesc->mHandle;
+ }
+ nextAudioPortGeneration();
+ mpClientInterface->onAudioPatchListUpdate();
+ }
+ if (patchDescPtr) *patchDescPtr = patchDesc;
+ return status;
+}
+
} // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 48e0472..9436767 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -17,6 +17,7 @@
#pragma once
#include <atomic>
+#include <functional>
#include <memory>
#include <unordered_set>
@@ -30,6 +31,7 @@
#include <utils/SortedVector.h>
#include <media/AudioParameter.h>
#include <media/AudioPolicy.h>
+#include <media/PatchBuilder.h>
#include "AudioPolicyInterface.h"
#include <AudioPolicyManagerInterface.h>
@@ -38,6 +40,7 @@
#include <AudioPolicyConfig.h>
#include <AudioPort.h>
#include <AudioPatch.h>
+#include <AudioProfile.h>
#include <DeviceDescriptor.h>
#include <IOProfile.h>
#include <HwModule.h>
@@ -118,15 +121,9 @@
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId);
- virtual status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- virtual status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- virtual void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
+ virtual status_t startOutput(audio_port_handle_t portId);
+ virtual status_t stopOutput(audio_port_handle_t portId);
+ virtual void releaseOutput(audio_port_handle_t portId);
virtual status_t getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
@@ -138,16 +135,13 @@
audio_port_handle_t *portId);
// indicates to the audio policy manager that the input starts being used.
- virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session,
+ virtual status_t startInput(audio_port_handle_t portId,
bool silenced,
concurrency_type__mask_t *concurrency);
// indicates to the audio policy manager that the input stops being used.
- virtual status_t stopInput(audio_io_handle_t input,
- audio_session_t session);
- virtual void releaseInput(audio_io_handle_t input,
- audio_session_t session);
+ virtual status_t stopInput(audio_port_handle_t portId);
+ virtual void releaseInput(audio_port_handle_t portId);
virtual void closeAllInputs();
virtual void initStreamVolume(audio_stream_type_t stream,
int indexMin,
@@ -229,9 +223,9 @@
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_patch_handle_t *handle,
+ audio_port_handle_t *portId,
uid_t uid);
- virtual status_t stopAudioSource(audio_patch_handle_t handle);
+ virtual status_t stopAudioSource(audio_port_handle_t portId);
virtual status_t setMasterMono(bool mono);
virtual status_t getMasterMono(bool *mono);
@@ -353,6 +347,10 @@
int index,
audio_devices_t device);
+ // rescale volume index from srcStream within range of dstStream
+ int rescaleVolumeIndex(int srcIndex,
+ audio_stream_type_t srcStream,
+ audio_stream_type_t dstStream);
// check that volume change is permitted, compute and send new volume to audio hardware
virtual status_t checkAndSetVolume(audio_stream_type_t stream, int index,
const sp<AudioOutputDescriptor>& outputDesc,
@@ -377,10 +375,6 @@
int delayMs = 0,
audio_devices_t device = (audio_devices_t)0);
- // handle special cases for sonification strategy while in call: mute streams or replace by
- // a special tone in the device used for communication
- void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
-
audio_mode_t getPhoneState();
// true if device is in a telephony or VoIP call
@@ -410,6 +404,12 @@
// close an input.
void closeInput(audio_io_handle_t input);
+ // runs all the checks required for accomodating changes in devices and outputs
+ // if 'onOutputsChecked' callback is provided, it is executed after the outputs
+ // check via 'checkOutputForAllStrategies'. If the callback returns 'true',
+ // A2DP suspend status is rechecked.
+ void checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked = nullptr);
+
// checks and if necessary changes outputs used for all strategies.
// must be called every time a condition that affects the output choice for a given strategy
// changes: connected device, phone state, force use...
@@ -451,8 +451,6 @@
SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device,
const SwAudioOutputCollection& openOutputs);
- bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
- SortedVector<audio_io_handle_t>& outputs2);
// mute/unmute strategies using an incompatible device combination
// if muting, wait for the audio in pcm buffer to be drained before proceeding
@@ -501,13 +499,14 @@
if (!hasPrimaryOutput()) {
return AUDIO_DEVICE_NONE;
}
- return mAvailableInputDevices.getDevicesFromHwModule(mPrimaryOutput->getModuleHandle());
+ return mAvailableInputDevices.getDeviceTypesFromHwModule(
+ mPrimaryOutput->getModuleHandle());
}
uint32_t updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs = 0);
sp<AudioPatch> createTelephonyPatch(bool isRx, audio_devices_t device, uint32_t delayMs);
- sp<DeviceDescriptor> fillAudioPortConfigForDevice(
- const DeviceVector& devices, audio_devices_t device, audio_port_config *config);
+ sp<DeviceDescriptor> findDevice(
+ const DeviceVector& devices, audio_devices_t device) const;
// if argument "device" is different from AUDIO_DEVICE_NONE, startSource() will force
// the re-evaluation of the output device.
@@ -526,10 +525,10 @@
status_t hasPrimaryOutput() const { return mPrimaryOutput != 0; }
- status_t connectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc);
- status_t disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc);
+ status_t connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
+ status_t disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
- sp<AudioSourceDescriptor> getSourceForStrategyOnOutput(audio_io_handle_t output,
+ sp<SourceClientDescriptor> getSourceForStrategyOnOutput(audio_io_handle_t output,
routing_strategy strategy);
void cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc);
@@ -544,7 +543,11 @@
static bool streamsMatchForvolume(audio_stream_type_t stream1,
audio_stream_type_t stream2);
- uid_t mUidCached;
+ void closeSessions(const sp<AudioInputDescriptor>& input, bool activeOnly);
+ void closeSession(const sp<AudioInputDescriptor>& input,
+ const sp<AudioSession>& session);
+
+ const uid_t mUidCached; // AID_AUDIOSERVER
AudioPolicyClientInterface *mpClientInterface; // audio policy client interface
sp<SwAudioOutputDescriptor> mPrimaryOutput; // primary output descriptor
// list of descriptors for outputs currently opened
@@ -584,7 +587,7 @@
sp<AudioPatch> mCallRxPatch;
HwAudioOutputCollection mHwOutputs;
- AudioSourceCollection mAudioSources;
+ SourceClientCollection mAudioSources;
// for supporting "beacon" streams, i.e. streams that only play on speaker, and never
// when something other than STREAM_TTS (a.k.a. "Transmitted Through Speaker") is playing
@@ -617,6 +620,17 @@
status_t getSupportedFormats(audio_io_handle_t ioHandle, FormatVector& formats);
+ // Support for Multi-Stream Decoder (MSD) module
+ sp<DeviceDescriptor> getMsdAudioInDevice() const;
+ audio_devices_t getMsdAudioOutDeviceTypes() const;
+ const AudioPatchCollection getMsdPatches() const;
+ status_t getBestMsdAudioProfileFor(audio_devices_t outputDevice,
+ bool hwAvSync,
+ audio_port_config *sourceConfig,
+ audio_port_config *sinkConfig) const;
+ PatchBuilder buildMsdPatch(audio_devices_t outputDevice) const;
+ status_t setMsdPatch(audio_devices_t outputDevice = AUDIO_DEVICE_NONE);
+
// If any, resolve any "dynamic" fields of an Audio Profiles collection
void updateAudioProfiles(audio_devices_t device, audio_io_handle_t ioHandle,
AudioProfileVector &profiles);
@@ -684,6 +698,18 @@
param.addInt(String8(AudioParameter::keyMonoOutput), (int)mMasterMono);
mpClientInterface->setParameters(output, param.toString());
}
+ status_t installPatch(const char *caller,
+ audio_patch_handle_t *patchHandle,
+ AudioIODescriptorInterface *ioDescriptor,
+ const struct audio_patch *patch,
+ int delayMs);
+ status_t installPatch(const char *caller,
+ ssize_t index,
+ audio_patch_handle_t *patchHandle,
+ const struct audio_patch *patch,
+ int delayMs,
+ uid_t uid,
+ sp<AudioPatch> *patchDescPtr);
bool soundTriggerSupportsConcurrentCapture();
bool mSoundTriggerSupportsConcurrentCapture;
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index b064f8c..21fffec 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -155,17 +155,6 @@
return result;
}
-status_t AudioPolicyService::AudioPolicyClient::startTone(audio_policy_tone_t tone,
- audio_stream_type_t stream)
-{
- return mAudioPolicyService->startTone(tone, stream);
-}
-
-status_t AudioPolicyService::AudioPolicyClient::stopTone()
-{
- return mAudioPolicyService->stopTone();
-}
-
status_t AudioPolicyService::AudioPolicyClient::setVoiceVolume(float volume, int delay_ms)
{
return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index c7dfe0f..2858aad 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -23,7 +23,9 @@
#include <memory>
#include <cutils/misc.h>
#include <media/AudioEffect.h>
+#include <media/AudioPolicyHelper.h>
#include <media/EffectsConfig.h>
+#include <mediautils/ServiceUtilities.h>
#include <system/audio.h>
#include <system/audio_effects/audio_effects_conf.h>
#include <utils/Vector.h>
@@ -31,7 +33,6 @@
#include <cutils/config_utils.h>
#include <binder/IPCThreadState.h>
#include "AudioPolicyEffects.h"
-#include "ServiceUtilities.h"
namespace android {
@@ -317,6 +318,102 @@
return status;
}
+status_t AudioPolicyEffects::addStreamDefaultEffect(const effect_uuid_t *type,
+ const String16& opPackageName,
+ const effect_uuid_t *uuid,
+ int32_t priority,
+ audio_usage_t usage,
+ audio_unique_id_t* id)
+{
+ if (uuid == NULL || type == NULL) {
+ ALOGE("addStreamDefaultEffect(): Null uuid or type uuid pointer");
+ return BAD_VALUE;
+ }
+
+ audio_stream_type_t stream = audio_usage_to_stream_type(usage);
+
+ if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
+ ALOGE("addStreamDefaultEffect(): Unsupported stream type %d", stream);
+ return BAD_VALUE;
+ }
+
+ // Check that |uuid| or |type| corresponds to an effect on the system.
+ effect_descriptor_t descriptor = {};
+ status_t res = AudioEffect::getEffectDescriptor(
+ uuid, type, EFFECT_FLAG_TYPE_INSERT, &descriptor);
+ if (res != OK) {
+ ALOGE("addStreamDefaultEffect(): Failed to find effect descriptor matching uuid/type.");
+ return res;
+ }
+
+ // Only insert effects can be added dynamically as stream defaults.
+ if ((descriptor.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_INSERT) {
+ ALOGE("addStreamDefaultEffect(): Desired effect cannot be attached "
+ "as a stream default effect.");
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ // Find the EffectDescVector for the given stream type, or create a new one if necessary.
+ ssize_t index = mOutputStreams.indexOfKey(stream);
+ EffectDescVector *desc = NULL;
+ if (index < 0) {
+ // No effects for this stream type yet.
+ desc = new EffectDescVector();
+ mOutputStreams.add(stream, desc);
+ } else {
+ desc = mOutputStreams.valueAt(index);
+ }
+
+ // Create a new effect and add it to the vector.
+ res = AudioEffect::newEffectUniqueId(id);
+ if (res != OK) {
+ ALOGE("addStreamDefaultEffect(): failed to get new unique id.");
+ return res;
+ }
+ EffectDesc *effect = new EffectDesc(
+ descriptor.name, *type, opPackageName, *uuid, priority, *id);
+ desc->mEffects.add(effect);
+ // TODO(b/71813697): Support setting params as well.
+
+ // TODO(b/71814300): Retroactively attach to any existing streams of the given type.
+ // This requires tracking the stream type of each session id in addition to what is
+ // already being tracked.
+
+ return NO_ERROR;
+}
+
+status_t AudioPolicyEffects::removeStreamDefaultEffect(audio_unique_id_t id)
+{
+ if (id == AUDIO_UNIQUE_ID_ALLOCATE) {
+ // ALLOCATE is not a unique identifier, but rather a reserved value indicating
+ // a real id has not been assigned. For default effects, this value is only used
+ // by system-owned defaults from the loaded config, which cannot be removed.
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ // Check each stream type.
+ size_t numStreams = mOutputStreams.size();
+ for (size_t i = 0; i < numStreams; ++i) {
+ // Check each effect for each stream.
+ EffectDescVector* descVector = mOutputStreams[i];
+ for (auto desc = descVector->mEffects.begin(); desc != descVector->mEffects.end(); ++desc) {
+ if ((*desc)->mId == id) {
+ // Found it!
+ // TODO(b/71814300): Remove from any streams the effect was attached to.
+ descVector->mEffects.erase(desc);
+ // Handles are unique; there can only be one match, so return early.
+ return NO_ERROR;
+ }
+ }
+ }
+
+ // Effect wasn't found, so it's been trivially removed successfully.
+ return NO_ERROR;
+}
void AudioPolicyEffects::EffectVector::setProcessorEnabled(bool enabled)
{
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index 623180e..69367b1 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -64,7 +64,6 @@
status_t releaseInputEffects(audio_io_handle_t input,
audio_session_t audioSession);
-
// Return a list of effect descriptors for default output effects
// associated with audioSession
status_t queryDefaultOutputSessionEffects(audio_session_t audioSession,
@@ -82,18 +81,49 @@
audio_stream_type_t stream,
audio_session_t audioSession);
+ // Add the effect to the list of default effects for streams of type |stream|.
+ status_t addStreamDefaultEffect(const effect_uuid_t *type,
+ const String16& opPackageName,
+ const effect_uuid_t *uuid,
+ int32_t priority,
+ audio_usage_t usage,
+ audio_unique_id_t* id);
+
+ // Remove the default stream effect from wherever it's attached.
+ status_t removeStreamDefaultEffect(audio_unique_id_t id);
+
private:
// class to store the description of an effects and its parameters
// as defined in audio_effects.conf
class EffectDesc {
public:
- EffectDesc(const char *name, const effect_uuid_t& uuid) :
+ EffectDesc(const char *name,
+ const effect_uuid_t& typeUuid,
+ const String16& opPackageName,
+ const effect_uuid_t& uuid,
+ uint32_t priority,
+ audio_unique_id_t id) :
mName(strdup(name)),
- mUuid(uuid) { }
+ mTypeUuid(typeUuid),
+ mOpPackageName(opPackageName),
+ mUuid(uuid),
+ mPriority(priority),
+ mId(id) { }
+ EffectDesc(const char *name, const effect_uuid_t& uuid) :
+ EffectDesc(name,
+ *EFFECT_UUID_NULL,
+ String16(""),
+ uuid,
+ 0,
+ AUDIO_UNIQUE_ID_ALLOCATE) { }
EffectDesc(const EffectDesc& orig) :
mName(strdup(orig.mName)),
- mUuid(orig.mUuid) {
+ mTypeUuid(orig.mTypeUuid),
+ mOpPackageName(orig.mOpPackageName),
+ mUuid(orig.mUuid),
+ mPriority(orig.mPriority),
+ mId(orig.mId) {
// deep copy mParams
for (size_t k = 0; k < orig.mParams.size(); k++) {
effect_param_t *origParam = orig.mParams[k];
@@ -116,7 +146,11 @@
}
}
char *mName;
+ effect_uuid_t mTypeUuid;
+ String16 mOpPackageName;
effect_uuid_t mUuid;
+ int32_t mPriority;
+ audio_unique_id_t mId;
Vector <effect_param_t *> mParams;
};
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 7337f04..3439c9b 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -17,12 +17,12 @@
#define LOG_TAG "AudioPolicyIntefaceImpl"
//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-#include <media/MediaAnalyticsItem.h>
-
#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
#include "TypeConverter.h"
+#include <media/AudioPolicyHelper.h>
+#include <media/MediaAnalyticsItem.h>
+#include <mediautils/ServiceUtilities.h>
+#include <utils/Log.h>
namespace android {
@@ -183,7 +183,7 @@
Mutex::Autolock _l(mLock);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isTrustedCallingUid(callingUid) || uid == (uid_t)-1) {
+ if (!isAudioServerOrMediaServerUid(callingUid) || uid == (uid_t)-1) {
ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
uid = callingUid;
@@ -200,7 +200,7 @@
!modifyPhoneStateAllowed(pid, uid)) {
// If the app tries to play music through the telephony device and doesn't have permission
// the fallback to the default output device.
- mAudioPolicyManager->releaseOutput(*output, *stream, session);
+ mAudioPolicyManager->releaseOutput(*portId);
flags = originalFlags;
*selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
*portId = AUDIO_PORT_HANDLE_NONE;
@@ -208,93 +208,116 @@
config,
&flags, selectedDeviceId, portId);
}
+
+ if (result == NO_ERROR) {
+ sp <AudioPlaybackClient> client =
+ new AudioPlaybackClient(*attr, *output, uid, pid, session, *selectedDeviceId, *stream);
+ mAudioPlaybackClients.add(*portId, client);
+ }
return result;
}
-status_t AudioPolicyService::startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioPolicyService::startOutput(audio_port_handle_t portId)
{
- if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
- return BAD_VALUE;
- }
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
ALOGV("startOutput()");
+ sp<AudioPlaybackClient> client;
sp<AudioPolicyEffects>audioPolicyEffects;
{
Mutex::Autolock _l(mLock);
+ const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+ if (index < 0) {
+ ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+ return INVALID_OPERATION;
+ }
+ client = mAudioPlaybackClients.valueAt(index);
audioPolicyEffects = mAudioPolicyEffects;
}
if (audioPolicyEffects != 0) {
// create audio processors according to stream
- status_t status = audioPolicyEffects->addOutputSessionEffects(output, stream, session);
+ status_t status = audioPolicyEffects->addOutputSessionEffects(
+ client->io, client->stream, client->session);
if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to add effects on session %d", session);
+ ALOGW("Failed to add effects on session %d", client->session);
}
}
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- return mAudioPolicyManager->startOutput(output, stream, session);
+ status_t status = mAudioPolicyManager->startOutput(portId);
+ if (status == NO_ERROR) {
+ client->active = true;
+ }
+ return status;
}
-status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioPolicyService::stopOutput(audio_port_handle_t portId)
{
- if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
- return BAD_VALUE;
- }
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
ALOGV("stopOutput()");
- mOutputCommandThread->stopOutputCommand(output, stream, session);
+ mOutputCommandThread->stopOutputCommand(portId);
return NO_ERROR;
}
-status_t AudioPolicyService::doStopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioPolicyService::doStopOutput(audio_port_handle_t portId)
{
- ALOGV("doStopOutput from tid %d", gettid());
+ ALOGV("doStopOutput");
+ sp<AudioPlaybackClient> client;
sp<AudioPolicyEffects>audioPolicyEffects;
{
Mutex::Autolock _l(mLock);
+
+ const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+ if (index < 0) {
+ ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+ return INVALID_OPERATION;
+ }
+ client = mAudioPlaybackClients.valueAt(index);
audioPolicyEffects = mAudioPolicyEffects;
}
if (audioPolicyEffects != 0) {
// release audio processors from the stream
- status_t status = audioPolicyEffects->releaseOutputSessionEffects(output, stream, session);
+ status_t status = audioPolicyEffects->releaseOutputSessionEffects(
+ client->io, client->stream, client->session);
if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to release effects on session %d", session);
+ ALOGW("Failed to release effects on session %d", client->session);
}
}
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- return mAudioPolicyManager->stopOutput(output, stream, session);
+ status_t status = mAudioPolicyManager->stopOutput(portId);
+ if (status == NO_ERROR) {
+ client->active = false;
+ }
+ return status;
}
-void AudioPolicyService::releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioPolicyService::releaseOutput(audio_port_handle_t portId)
{
if (mAudioPolicyManager == NULL) {
return;
}
ALOGV("releaseOutput()");
- mOutputCommandThread->releaseOutputCommand(output, stream, session);
+ mOutputCommandThread->releaseOutputCommand(portId);
}
-void AudioPolicyService::doReleaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioPolicyService::doReleaseOutput(audio_port_handle_t portId)
{
ALOGV("doReleaseOutput from tid %d", gettid());
Mutex::Autolock _l(mLock);
+ const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+ if (index < 0) {
+ ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+ return;
+ }
+ sp<AudioPlaybackClient> client = mAudioPlaybackClients.valueAt(index);
+ mAudioRecordClients.removeItem(portId);
+
// called from internal thread: no need to clear caller identity
- mAudioPolicyManager->releaseOutput(output, stream, session);
+ mAudioPolicyManager->releaseOutput(portId);
}
status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
@@ -320,7 +343,7 @@
bool updatePid = (pid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isTrustedCallingUid(callingUid)) {
+ if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
uid = callingUid;
@@ -398,17 +421,13 @@
if (status != NO_ERROR) {
if (status == PERMISSION_DENIED) {
AutoCallerClear acc;
- mAudioPolicyManager->releaseInput(*input, session);
+ mAudioPolicyManager->releaseInput(*portId);
}
return status;
}
- sp<AudioRecordClient> client =
- new AudioRecordClient(*attr, *input, uid, pid, opPackageName, session);
- client->active = false;
- client->isConcurrent = false;
- client->isVirtualDevice = false; //TODO : update from APM->getInputForAttr()
- client->deviceId = *selectedDeviceId;
+ sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
+ *selectedDeviceId, opPackageName);
mAudioRecordClients.add(*portId, client);
}
@@ -496,8 +515,7 @@
status_t status;
{
AutoCallerClear acc;
- status = mAudioPolicyManager->startInput(
- client->input, client->session, *silenced, &concurrency);
+ status = mAudioPolicyManager->startInput(portId, *silenced, &concurrency);
}
@@ -528,10 +546,13 @@
item->setCString(kAudioPolicyRqstSrc,
audioSourceString(client->attributes.source).c_str());
- item->setCString(kAudioPolicyRqstPkg,
- std::string(String8(client->opPackageName).string()).c_str());
item->setInt32(kAudioPolicyRqstSession, client->session);
-
+ if (client->opPackageName.size() != 0) {
+ item->setCString(kAudioPolicyRqstPkg,
+ std::string(String8(client->opPackageName).string()).c_str());
+ } else {
+ item->setCString(kAudioPolicyRqstPkg, std::to_string(client->uid).c_str());
+ }
item->setCString(
kAudioPolicyRqstDevice, getDeviceTypeStrForPortId(client->deviceId).c_str());
@@ -550,9 +571,14 @@
// keeps the last of the clients marked active
item->setCString(kAudioPolicyActiveSrc,
audioSourceString(other->attributes.source).c_str());
- item->setCString(kAudioPolicyActivePkg,
- std::string(String8(other->opPackageName).string()).c_str());
item->setInt32(kAudioPolicyActiveSession, other->session);
+ if (other->opPackageName.size() != 0) {
+ item->setCString(kAudioPolicyActivePkg,
+ std::string(String8(other->opPackageName).string()).c_str());
+ } else {
+ item->setCString(kAudioPolicyRqstPkg,
+ std::to_string(other->uid).c_str());
+ }
item->setCString(kAudioPolicyActiveDevice,
getDeviceTypeStrForPortId(other->deviceId).c_str());
}
@@ -602,7 +628,7 @@
// finish the recording app op
finishRecording(client->opPackageName, client->uid);
AutoCallerClear acc;
- return mAudioPolicyManager->stopInput(client->input, client->session);
+ return mAudioPolicyManager->stopInput(portId);
}
void AudioPolicyService::releaseInput(audio_port_handle_t portId)
@@ -627,15 +653,15 @@
}
if (audioPolicyEffects != 0) {
// release audio processors from the input
- status_t status = audioPolicyEffects->releaseInputEffects(client->input, client->session);
+ status_t status = audioPolicyEffects->releaseInputEffects(client->io, client->session);
if(status != NO_ERROR) {
- ALOGW("Failed to release effects on input %d", client->input);
+ ALOGW("Failed to release effects on input %d", client->io);
}
}
{
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- mAudioPolicyManager->releaseInput(client->input, client->session);
+ mAudioPolicyManager->releaseInput(portId);
}
}
@@ -824,6 +850,50 @@
(audio_session_t)audioSession, descriptors, count);
}
+status_t AudioPolicyService::addStreamDefaultEffect(const effect_uuid_t *type,
+ const String16& opPackageName,
+ const effect_uuid_t *uuid,
+ int32_t priority,
+ audio_usage_t usage,
+ audio_unique_id_t* id)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!modifyDefaultAudioEffectsAllowed()) {
+ return PERMISSION_DENIED;
+ }
+ sp<AudioPolicyEffects>audioPolicyEffects;
+ {
+ Mutex::Autolock _l(mLock);
+ audioPolicyEffects = mAudioPolicyEffects;
+ }
+ if (audioPolicyEffects == 0) {
+ return NO_INIT;
+ }
+ return audioPolicyEffects->addStreamDefaultEffect(
+ type, opPackageName, uuid, priority, usage, id);
+}
+
+status_t AudioPolicyService::removeStreamDefaultEffect(audio_unique_id_t id)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!modifyDefaultAudioEffectsAllowed()) {
+ return PERMISSION_DENIED;
+ }
+ sp<AudioPolicyEffects>audioPolicyEffects;
+ {
+ Mutex::Autolock _l(mLock);
+ audioPolicyEffects = mAudioPolicyEffects;
+ }
+ if (audioPolicyEffects == 0) {
+ return NO_INIT;
+ }
+ return audioPolicyEffects->removeStreamDefaultEffect(id);
+}
+
bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
{
if (mAudioPolicyManager == NULL) {
@@ -955,26 +1025,26 @@
}
status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
- const audio_attributes_t *attributes,
- audio_patch_handle_t *handle)
+ const audio_attributes_t *attributes,
+ audio_port_handle_t *portId)
{
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
AutoCallerClear acc;
- return mAudioPolicyManager->startAudioSource(source, attributes, handle,
+ return mAudioPolicyManager->startAudioSource(source, attributes, portId,
IPCThreadState::self()->getCallingUid());
}
-status_t AudioPolicyService::stopAudioSource(audio_patch_handle_t handle)
+status_t AudioPolicyService::stopAudioSource(audio_port_handle_t portId)
{
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
AutoCallerClear acc;
- return mAudioPolicyManager->stopAudioSource(handle);
+ return mAudioPolicyManager->stopAudioSource(portId);
}
status_t AudioPolicyService::setMasterMono(bool mono)
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index f3cddc3..8bca221 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -26,7 +26,6 @@
#include <sys/time.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
-#include <cutils/multiuser.h>
#include <cutils/properties.h>
#include <binder/IPCThreadState.h>
#include <binder/ActivityManager.h>
@@ -35,16 +34,14 @@
#include <utils/String16.h>
#include <utils/threads.h>
#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
#include <hardware_legacy/power.h>
#include <media/AudioEffect.h>
#include <media/AudioParameter.h>
+#include <mediautils/ServiceUtilities.h>
#include <system/audio.h>
#include <system/audio_policy.h>
-#include <private/android_filesystem_config.h>
-
namespace android {
static const char kDeadlockedString[] = "AudioPolicyService may be deadlocked\n";
@@ -70,8 +67,6 @@
{
Mutex::Autolock _l(mLock);
- // start tone playback thread
- mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
// start audio commands thread
mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
// start output activity command thread
@@ -93,7 +88,6 @@
AudioPolicyService::~AudioPolicyService()
{
- mTonePlaybackThread->exit();
mAudioCommandThread->exit();
mOutputCommandThread->exit();
@@ -118,13 +112,17 @@
Mutex::Autolock _l(mNotificationClientsLock);
uid_t uid = IPCThreadState::self()->getCallingUid();
- if (mNotificationClients.indexOfKey(uid) < 0) {
+ pid_t pid = IPCThreadState::self()->getCallingPid();
+ int64_t token = ((int64_t)uid<<32) | pid;
+
+ if (mNotificationClients.indexOfKey(token) < 0) {
sp<NotificationClient> notificationClient = new NotificationClient(this,
client,
- uid);
- ALOGV("registerClient() client %p, uid %d", client.get(), uid);
+ uid,
+ pid);
+ ALOGV("registerClient() client %p, uid %d pid %d", client.get(), uid, pid);
- mNotificationClients.add(uid, notificationClient);
+ mNotificationClients.add(token, notificationClient);
sp<IBinder> binder = IInterface::asBinder(client);
binder->linkToDeath(notificationClient);
@@ -136,22 +134,33 @@
Mutex::Autolock _l(mNotificationClientsLock);
uid_t uid = IPCThreadState::self()->getCallingUid();
- if (mNotificationClients.indexOfKey(uid) < 0) {
+ pid_t pid = IPCThreadState::self()->getCallingPid();
+ int64_t token = ((int64_t)uid<<32) | pid;
+
+ if (mNotificationClients.indexOfKey(token) < 0) {
return;
}
- mNotificationClients.valueFor(uid)->setAudioPortCallbacksEnabled(enabled);
+ mNotificationClients.valueFor(token)->setAudioPortCallbacksEnabled(enabled);
}
// removeNotificationClient() is called when the client process dies.
-void AudioPolicyService::removeNotificationClient(uid_t uid)
+void AudioPolicyService::removeNotificationClient(uid_t uid, pid_t pid)
{
{
Mutex::Autolock _l(mNotificationClientsLock);
- mNotificationClients.removeItem(uid);
+ int64_t token = ((int64_t)uid<<32) | pid;
+ mNotificationClients.removeItem(token);
}
{
Mutex::Autolock _l(mLock);
- if (mAudioPolicyManager) {
+ bool hasSameUid = false;
+ for (size_t i = 0; i < mNotificationClients.size(); i++) {
+ if (mNotificationClients.valueAt(i)->uid() == uid) {
+ hasSameUid = true;
+ break;
+ }
+ }
+ if (mAudioPolicyManager && !hasSameUid) {
// called from binder death notification: no need to clear caller identity
mAudioPolicyManager->releaseResourcesForUid(uid);
}
@@ -239,8 +248,9 @@
AudioPolicyService::NotificationClient::NotificationClient(const sp<AudioPolicyService>& service,
const sp<IAudioPolicyServiceClient>& client,
- uid_t uid)
- : mService(service), mUid(uid), mAudioPolicyServiceClient(client),
+ uid_t uid,
+ pid_t pid)
+ : mService(service), mUid(uid), mPid(pid), mAudioPolicyServiceClient(client),
mAudioPortCallbacksEnabled(false)
{
}
@@ -254,7 +264,7 @@
sp<NotificationClient> keep(this);
sp<AudioPolicyService> service = mService.promote();
if (service != 0) {
- service->removeNotificationClient(mUid);
+ service->removeNotificationClient(mUid, mPid);
}
}
@@ -275,7 +285,7 @@
void AudioPolicyService::NotificationClient::onDynamicPolicyMixStateUpdate(
const String8& regId, int32_t state)
{
- if (mAudioPolicyServiceClient != 0 && multiuser_get_app_id(mUid) < AID_APP_START) {
+ if (mAudioPolicyServiceClient != 0 && isServiceUid(mUid)) {
mAudioPolicyServiceClient->onDynamicPolicyMixStateUpdate(regId, state);
}
}
@@ -285,7 +295,7 @@
const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
audio_patch_handle_t patchHandle)
{
- if (mAudioPolicyServiceClient != 0 && multiuser_get_app_id(mUid) < AID_APP_START) {
+ if (mAudioPolicyServiceClient != 0 && isServiceUid(mUid)) {
mAudioPolicyServiceClient->onRecordingConfigurationUpdate(event, clientInfo,
clientConfig, deviceConfig, patchHandle);
}
@@ -325,8 +335,6 @@
result.append(buffer);
snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
result.append(buffer);
- snprintf(buffer, SIZE, "Tones Thread: %p\n", mTonePlaybackThread.get());
- result.append(buffer);
write(fd, result.string(), result.size());
return NO_ERROR;
@@ -362,9 +370,6 @@
if (mAudioCommandThread != 0) {
mAudioCommandThread->dump(fd);
}
- if (mTonePlaybackThread != 0) {
- mTonePlaybackThread->dump(fd);
- }
if (mAudioPolicyManager) {
mAudioPolicyManager->dump(fd);
@@ -577,10 +582,6 @@
updateUidCache(uid, false, true);
}
-bool AudioPolicyService::UidPolicy::isServiceUid(uid_t uid) const {
- return multiuser_get_app_id(uid) < AID_APP_START;
-}
-
void AudioPolicyService::UidPolicy::notifyService(uid_t uid, bool active) {
sp<AudioPolicyService> service = mService.promote();
if (service != nullptr) {
@@ -639,7 +640,6 @@
const wp<AudioPolicyService>& service)
: Thread(false), mName(name), mService(service)
{
- mpToneGenerator = NULL;
}
@@ -649,7 +649,6 @@
release_wake_lock(mName.string());
}
mAudioCommands.clear();
- delete mpToneGenerator;
}
void AudioPolicyService::AudioCommandThread::onFirstRef()
@@ -674,26 +673,6 @@
mLastCommand = command;
switch (command->mCommand) {
- case START_TONE: {
- mLock.unlock();
- ToneData *data = (ToneData *)command->mParam.get();
- ALOGV("AudioCommandThread() processing start tone %d on stream %d",
- data->mType, data->mStream);
- delete mpToneGenerator;
- mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
- mpToneGenerator->startTone(data->mType);
- mLock.lock();
- }break;
- case STOP_TONE: {
- mLock.unlock();
- ALOGV("AudioCommandThread() processing stop tone");
- if (mpToneGenerator != NULL) {
- mpToneGenerator->stopTone();
- delete mpToneGenerator;
- mpToneGenerator = NULL;
- }
- mLock.lock();
- }break;
case SET_VOLUME: {
VolumeData *data = (VolumeData *)command->mParam.get();
ALOGV("AudioCommandThread() processing set volume stream %d, \
@@ -716,26 +695,26 @@
}break;
case STOP_OUTPUT: {
StopOutputData *data = (StopOutputData *)command->mParam.get();
- ALOGV("AudioCommandThread() processing stop output %d",
- data->mIO);
+ ALOGV("AudioCommandThread() processing stop output portId %d",
+ data->mPortId);
svc = mService.promote();
if (svc == 0) {
break;
}
mLock.unlock();
- svc->doStopOutput(data->mIO, data->mStream, data->mSession);
+ svc->doStopOutput(data->mPortId);
mLock.lock();
}break;
case RELEASE_OUTPUT: {
ReleaseOutputData *data = (ReleaseOutputData *)command->mParam.get();
- ALOGV("AudioCommandThread() processing release output %d",
- data->mIO);
+ ALOGV("AudioCommandThread() processing release output portId %d",
+ data->mPortId);
svc = mService.promote();
if (svc == 0) {
break;
}
mLock.unlock();
- svc->doReleaseOutput(data->mIO, data->mStream, data->mSession);
+ svc->doReleaseOutput(data->mPortId);
mLock.lock();
}break;
case CREATE_AUDIO_PATCH: {
@@ -900,27 +879,6 @@
return NO_ERROR;
}
-void AudioPolicyService::AudioCommandThread::startToneCommand(ToneGenerator::tone_type type,
- audio_stream_type_t stream)
-{
- sp<AudioCommand> command = new AudioCommand();
- command->mCommand = START_TONE;
- sp<ToneData> data = new ToneData();
- data->mType = type;
- data->mStream = stream;
- command->mParam = data;
- ALOGV("AudioCommandThread() adding tone start type %d, stream %d", type, stream);
- sendCommand(command);
-}
-
-void AudioPolicyService::AudioCommandThread::stopToneCommand()
-{
- sp<AudioCommand> command = new AudioCommand();
- command->mCommand = STOP_TONE;
- ALOGV("AudioCommandThread() adding tone stop");
- sendCommand(command);
-}
-
status_t AudioPolicyService::AudioCommandThread::volumeCommand(audio_stream_type_t stream,
float volume,
audio_io_handle_t output,
@@ -967,33 +925,25 @@
return sendCommand(command, delayMs);
}
-void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_port_handle_t portId)
{
sp<AudioCommand> command = new AudioCommand();
command->mCommand = STOP_OUTPUT;
sp<StopOutputData> data = new StopOutputData();
- data->mIO = output;
- data->mStream = stream;
- data->mSession = session;
+ data->mPortId = portId;
command->mParam = data;
- ALOGV("AudioCommandThread() adding stop output %d", output);
+ ALOGV("AudioCommandThread() adding stop output portId %d", portId);
sendCommand(command);
}
-void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_port_handle_t portId)
{
sp<AudioCommand> command = new AudioCommand();
command->mCommand = RELEASE_OUTPUT;
sp<ReleaseOutputData> data = new ReleaseOutputData();
- data->mIO = output;
- data->mStream = stream;
- data->mSession = session;
+ data->mPortId = portId;
command->mParam = data;
- ALOGV("AudioCommandThread() adding release output %d", output);
+ ALOGV("AudioCommandThread() adding release output portId %d", portId);
sendCommand(command);
}
@@ -1212,6 +1162,7 @@
patch = ((CreateAudioPatchData *)command->mParam.get())->mPatch;
} else {
handle = ((ReleaseAudioPatchData *)command->mParam.get())->mHandle;
+ memset(&patch, 0, sizeof(patch));
}
audio_patch_handle_t handle2;
struct audio_patch patch2;
@@ -1256,8 +1207,6 @@
} break;
- case START_TONE:
- case STOP_TONE:
default:
break;
}
@@ -1330,27 +1279,6 @@
output, delayMs);
}
-int AudioPolicyService::startTone(audio_policy_tone_t tone,
- audio_stream_type_t stream)
-{
- if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) {
- ALOGE("startTone: illegal tone requested (%d)", tone);
- }
- if (stream != AUDIO_STREAM_VOICE_CALL) {
- ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
- tone);
- }
- mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
- AUDIO_STREAM_VOICE_CALL);
- return 0;
-}
-
-int AudioPolicyService::stopTone()
-{
- mTonePlaybackThread->stopToneCommand();
- return 0;
-}
-
int AudioPolicyService::setVoiceVolume(float volume, int delayMs)
{
return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
@@ -1406,9 +1334,6 @@
int aps_set_stream_volume(void *service, audio_stream_type_t stream,
float volume, audio_io_handle_t output,
int delay_ms);
-int aps_start_tone(void *service, audio_policy_tone_t tone,
- audio_stream_type_t stream);
-int aps_stop_tone(void *service);
int aps_set_voice_volume(void *service, float volume, int delay_ms);
};
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 407d7a5..44c0347 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -38,8 +38,6 @@
namespace android {
-using namespace std;
-
// ----------------------------------------------------------------------------
class AudioPolicyService :
@@ -83,15 +81,9 @@
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId);
- virtual status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- virtual status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- virtual void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
+ virtual status_t startOutput(audio_port_handle_t portId);
+ virtual status_t stopOutput(audio_port_handle_t portId);
+ virtual void releaseOutput(audio_port_handle_t portId);
virtual status_t getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
@@ -134,6 +126,14 @@
virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count);
+ virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+ const String16& opPackageName,
+ const effect_uuid_t *uuid,
+ int32_t priority,
+ audio_usage_t usage,
+ audio_unique_id_t* id);
+ virtual status_t removeStreamDefaultEffect(audio_unique_id_t id);
+
virtual status_t onTransact(
uint32_t code,
const Parcel& data,
@@ -159,8 +159,6 @@
float volume,
audio_io_handle_t output,
int delayMs = 0);
- virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
- virtual status_t stopTone();
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
virtual bool isOffloadSupported(const audio_offload_info_t &config);
@@ -194,8 +192,8 @@
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_patch_handle_t *handle);
- virtual status_t stopAudioSource(audio_patch_handle_t handle);
+ audio_port_handle_t *portId);
+ virtual status_t stopAudioSource(audio_port_handle_t portId);
virtual status_t setMasterMono(bool mono);
virtual status_t getMasterMono(bool *mono);
@@ -209,12 +207,8 @@
bool reported);
virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
- status_t doStopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- void doReleaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
+ status_t doStopOutput(audio_port_handle_t portId);
+ void doReleaseOutput(audio_port_handle_t portId);
status_t clientCreateAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle,
@@ -224,7 +218,7 @@
virtual status_t clientSetAudioPortConfig(const struct audio_port_config *config,
int delayMs);
- void removeNotificationClient(uid_t uid);
+ void removeNotificationClient(uid_t uid, pid_t pid);
void onAudioPortListUpdate();
void doOnAudioPortListUpdate();
void onAudioPatchListUpdate();
@@ -293,7 +287,6 @@
void removeOverrideUid(uid_t uid) { updateOverrideUid(uid, false, false); }
private:
- bool isServiceUid(uid_t uid) const;
void notifyService(uid_t uid, bool active);
void updateOverrideUid(uid_t uid, bool active, bool insert);
void updateUidCache(uid_t uid, bool active, bool insert);
@@ -307,10 +300,7 @@
std::unordered_map<uid_t, bool> mCachedUids;
};
- // Thread used for tone playback and to send audio config commands to audio flinger
- // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because
- // startTone() and stopTone() are normally called with mLock locked and requesting a tone start
- // or stop will cause calls to AudioPolicyService and an attempt to lock mLock.
+ // Thread used to send audio config commands to audio flinger
// For audio config commands, it is necessary because audio flinger requires that the calling
// process (user) has permission to modify audio settings.
class AudioCommandThread : public Thread {
@@ -319,8 +309,6 @@
// commands for tone AudioCommand
enum {
- START_TONE,
- STOP_TONE,
SET_VOLUME,
SET_PARAMETERS,
SET_VOICE_VOLUME,
@@ -345,20 +333,13 @@
virtual bool threadLoop();
void exit();
- void startToneCommand(ToneGenerator::tone_type type,
- audio_stream_type_t stream);
- void stopToneCommand();
status_t volumeCommand(audio_stream_type_t stream, float volume,
audio_io_handle_t output, int delayMs = 0);
status_t parametersCommand(audio_io_handle_t ioHandle,
const char *keyValuePairs, int delayMs = 0);
status_t voiceVolumeCommand(float volume, int delayMs = 0);
- void stopOutputCommand(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- void releaseOutputCommand(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
+ void stopOutputCommand(audio_port_handle_t portId);
+ void releaseOutputCommand(audio_port_handle_t portId);
status_t sendCommand(sp<AudioCommand>& command, int delayMs = 0);
void insertCommand_l(sp<AudioCommand>& command, int delayMs = 0);
status_t createAudioPatchCommand(const struct audio_patch *patch,
@@ -390,7 +371,7 @@
void dump(char* buffer, size_t size);
- int mCommand; // START_TONE, STOP_TONE ...
+ int mCommand; // SET_VOLUME, SET_PARAMETERS...
nsecs_t mTime; // time stamp
Mutex mLock; // mutex associated to mCond
Condition mCond; // condition for status return
@@ -406,12 +387,6 @@
AudioCommandData() {}
};
- class ToneData : public AudioCommandData {
- public:
- ToneGenerator::tone_type mType; // tone type (START_TONE only)
- audio_stream_type_t mStream; // stream type (START_TONE only)
- };
-
class VolumeData : public AudioCommandData {
public:
audio_stream_type_t mStream;
@@ -432,16 +407,12 @@
class StopOutputData : public AudioCommandData {
public:
- audio_io_handle_t mIO;
- audio_stream_type_t mStream;
- audio_session_t mSession;
+ audio_port_handle_t mPortId;
};
class ReleaseOutputData : public AudioCommandData {
public:
- audio_io_handle_t mIO;
- audio_stream_type_t mStream;
- audio_session_t mSession;
+ audio_port_handle_t mPortId;
};
class CreateAudioPatchData : public AudioCommandData {
@@ -478,7 +449,6 @@
Mutex mLock;
Condition mWaitWorkCV;
Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
- ToneGenerator *mpToneGenerator; // the tone generator
sp<AudioCommand> mLastCommand; // last processed command (used by dump)
String8 mName; // string used by wake lock fo delayed commands
wp<AudioPolicyService> mService;
@@ -553,11 +523,6 @@
// function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
- // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
- // over a telephony device during a phone call.
- virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
- virtual status_t stopTone();
-
// set down link audio volume.
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
@@ -597,7 +562,7 @@
public:
NotificationClient(const sp<AudioPolicyService>& service,
const sp<IAudioPolicyServiceClient>& client,
- uid_t uid);
+ uid_t uid, pid_t pid);
virtual ~NotificationClient();
void onAudioPortListUpdate();
@@ -610,6 +575,10 @@
audio_patch_handle_t patchHandle);
void setAudioPortCallbacksEnabled(bool enabled);
+ uid_t uid() {
+ return mUid;
+ }
+
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
@@ -619,34 +588,61 @@
const wp<AudioPolicyService> mService;
const uid_t mUid;
+ const pid_t mPid;
const sp<IAudioPolicyServiceClient> mAudioPolicyServiceClient;
bool mAudioPortCallbacksEnabled;
};
+ class AudioClient : public virtual RefBase {
+ public:
+ AudioClient(const audio_attributes_t attributes,
+ const audio_io_handle_t io, uid_t uid, pid_t pid,
+ const audio_session_t session, const audio_port_handle_t deviceId) :
+ attributes(attributes), io(io), uid(uid), pid(pid),
+ session(session), deviceId(deviceId), active(false) {}
+ ~AudioClient() override = default;
+
+
+ const audio_attributes_t attributes; // source, flags ...
+ const audio_io_handle_t io; // audio HAL stream IO handle
+ const uid_t uid; // client UID
+ const pid_t pid; // client PID
+ const audio_session_t session; // audio session ID
+ const audio_port_handle_t deviceId; // selected input device port ID
+ bool active; // Playback/Capture is active or inactive
+ };
+
// --- AudioRecordClient ---
// Information about each registered AudioRecord client
// (between calls to getInputForAttr() and releaseInput())
- class AudioRecordClient : public RefBase {
+ class AudioRecordClient : public AudioClient {
public:
AudioRecordClient(const audio_attributes_t attributes,
- const audio_io_handle_t input, uid_t uid, pid_t pid,
- const String16& opPackageName, const audio_session_t session) :
- attributes(attributes),
- input(input), uid(uid), pid(pid),
- opPackageName(opPackageName), session(session),
- active(false), isConcurrent(false), isVirtualDevice(false) {}
- virtual ~AudioRecordClient() {}
+ const audio_io_handle_t io, uid_t uid, pid_t pid,
+ const audio_session_t session, const audio_port_handle_t deviceId,
+ const String16& opPackageName) :
+ AudioClient(attributes, io, uid, pid, session, deviceId),
+ opPackageName(opPackageName), isConcurrent(false), isVirtualDevice(false) {}
+ ~AudioRecordClient() override = default;
- const audio_attributes_t attributes; // source, flags ...
- const audio_io_handle_t input; // audio HAL input IO handle
- const uid_t uid; // client UID
- const pid_t pid; // client PID
const String16 opPackageName; // client package name
- const audio_session_t session; // audio session ID
- bool active; // Capture is active or inactive
bool isConcurrent; // is allowed to concurrent capture
bool isVirtualDevice; // uses virtual device: updated by APM::getInputForAttr()
- audio_port_handle_t deviceId; // selected input device port ID
+ };
+
+ // --- AudioPlaybackClient ---
+ // Information about each registered AudioTrack client
+ // (between calls to getOutputForAttr() and releaseOutput())
+ class AudioPlaybackClient : public AudioClient {
+ public:
+ AudioPlaybackClient(const audio_attributes_t attributes,
+ const audio_io_handle_t io, uid_t uid, pid_t pid,
+ const audio_session_t session, audio_port_handle_t deviceId,
+ audio_stream_type_t stream) :
+ AudioClient(attributes, io, uid, pid, session, deviceId), stream(stream) {}
+ ~AudioPlaybackClient() override = default;
+
+ const audio_stream_type_t stream;
};
// A class automatically clearing and restoring binder caller identity inside
@@ -676,14 +672,13 @@
// mLock protects AudioPolicyManager methods that can call into audio flinger
// and possibly back in to audio policy service and acquire mEffectsLock.
sp<AudioCommandThread> mAudioCommandThread; // audio commands thread
- sp<AudioCommandThread> mTonePlaybackThread; // tone playback thread
sp<AudioCommandThread> mOutputCommandThread; // process stop and release output
struct audio_policy_device *mpAudioPolicyDev;
struct audio_policy *mpAudioPolicy;
AudioPolicyInterface *mAudioPolicyManager;
AudioPolicyClient *mAudioPolicyClient;
- DefaultKeyedVector< uid_t, sp<NotificationClient> > mNotificationClients;
+ DefaultKeyedVector< int64_t, sp<NotificationClient> > mNotificationClients;
Mutex mNotificationClientsLock; // protects mNotificationClients
// Manage all effects configured in audio_effects.conf
sp<AudioPolicyEffects> mAudioPolicyEffects;
@@ -691,6 +686,7 @@
sp<UidPolicy> mUidPolicy;
DefaultKeyedVector< audio_port_handle_t, sp<AudioRecordClient> > mAudioRecordClients;
+ DefaultKeyedVector< audio_port_handle_t, sp<AudioPlaybackClient> > mAudioPlaybackClients;
};
} // namespace android
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index a43daea..b739b88 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -6,7 +6,6 @@
frameworks/av/services/audiopolicy \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy/engine/interface \
- frameworks/av/services/audiopolicy/utilities
LOCAL_SHARED_LIBRARIES := \
libaudiopolicymanagerdefault \
@@ -30,3 +29,26 @@
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
include $(BUILD_NATIVE_TEST)
+
+# system/audio.h utilities test
+
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+ libbase \
+ liblog \
+ libmedia_helper \
+ libutils
+
+LOCAL_SRC_FILES := \
+ systemaudio_tests.cpp \
+
+LOCAL_MODULE := systemaudio_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index eb8222c..2ff7675 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -60,9 +60,6 @@
int /*delayMs*/) override { }
String8 getParameters(audio_io_handle_t /*ioHandle*/,
const String8& /*keys*/) override { return String8(); }
- status_t startTone(audio_policy_tone_t /*tone*/,
- audio_stream_type_t /*stream*/) override { return NO_INIT; }
- status_t stopTone() override { return NO_INIT; }
status_t setVoiceVolume(float /*volume*/, int /*delayMs*/) override { return NO_INIT; }
status_t moveEffects(audio_session_t /*session*/,
audio_io_handle_t /*srcOutput*/,
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index a9593b8..56af152 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -16,9 +16,15 @@
#include <memory>
#include <set>
+#include <sys/wait.h>
+#include <unistd.h>
#include <gtest/gtest.h>
+#define LOG_TAG "APM_Test"
+#include <log/log.h>
+#include <media/PatchBuilder.h>
+
#include "AudioPolicyTestClient.h"
#include "AudioPolicyTestManager.h"
@@ -132,6 +138,36 @@
// SetUp must finish with no assertions.
}
+TEST_F(AudioPolicyManagerTest, Dump) {
+ int pipefd[2];
+ ASSERT_NE(-1, pipe(pipefd));
+ pid_t cpid = fork();
+ ASSERT_NE(-1, cpid);
+ if (cpid == 0) {
+ // Child process reads from the pipe and logs.
+ close(pipefd[1]);
+ std::string line;
+ char buf;
+ while (read(pipefd[0], &buf, sizeof(buf)) > 0) {
+ if (buf != '\n') {
+ line += buf;
+ } else {
+ ALOGI("%s", line.c_str());
+ line = "";
+ }
+ }
+ if (!line.empty()) ALOGI("%s", line.c_str());
+ close(pipefd[0]);
+ _exit(EXIT_SUCCESS);
+ } else {
+ // Parent does the dump and checks the status code.
+ close(pipefd[0]);
+ ASSERT_EQ(NO_ERROR, mManager->dump(pipefd[1]));
+ close(pipefd[1]);
+ wait(NULL); // Wait for the child to exit.
+ }
+}
+
TEST_F(AudioPolicyManagerTest, CreateAudioPatchFailure) {
audio_patch patch{};
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
@@ -166,29 +202,14 @@
}
TEST_F(AudioPolicyManagerTest, CreateAudioPatchFromMix) {
- audio_patch patch{};
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
uid_t uid = 42;
const size_t patchCountBefore = mClient->getActivePatchesCount();
- patch.num_sources = 1;
- {
- auto& src = patch.sources[0];
- src.role = AUDIO_PORT_ROLE_SOURCE;
- src.type = AUDIO_PORT_TYPE_MIX;
- src.id = mManager->getConfig().getAvailableInputDevices()[0]->getId();
- // Note: these are the parameters of the output device.
- src.sample_rate = 44100;
- src.format = AUDIO_FORMAT_PCM_16_BIT;
- src.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
- }
- patch.num_sinks = 1;
- {
- auto& sink = patch.sinks[0];
- sink.role = AUDIO_PORT_ROLE_SINK;
- sink.type = AUDIO_PORT_TYPE_DEVICE;
- sink.id = mManager->getConfig().getDefaultOutputDevice()->getId();
- }
- ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(&patch, &handle, uid));
+ ASSERT_FALSE(mManager->getConfig().getAvailableInputDevices().isEmpty());
+ PatchBuilder patchBuilder;
+ patchBuilder.addSource(mManager->getConfig().getAvailableInputDevices()[0]).
+ addSink(mManager->getConfig().getDefaultOutputDevice());
+ ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(patchBuilder.patch(), &handle, uid));
ASSERT_NE(AUDIO_PATCH_HANDLE_NONE, handle);
ASSERT_EQ(patchCountBefore + 1, mClient->getActivePatchesCount());
}
diff --git a/services/audiopolicy/tests/systemaudio_tests.cpp b/services/audiopolicy/tests/systemaudio_tests.cpp
new file mode 100644
index 0000000..abaae52
--- /dev/null
+++ b/services/audiopolicy/tests/systemaudio_tests.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#define LOG_TAG "SysAudio_Test"
+#include <log/log.h>
+#include <media/PatchBuilder.h>
+#include <system/audio.h>
+
+using namespace android;
+
+TEST(SystemAudioTest, PatchInvalid) {
+ audio_patch patch{};
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+ patch.num_sources = AUDIO_PATCH_PORTS_MAX + 1;
+ patch.num_sinks = 1;
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+ patch.num_sources = 1;
+ patch.num_sinks = AUDIO_PATCH_PORTS_MAX + 1;
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+ patch.num_sources = 0;
+ patch.num_sinks = 1;
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+}
+
+TEST(SystemAudioTest, PatchValid) {
+ const audio_port_config src = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+ // It's OK not to have sinks.
+ ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).patch()));
+ const audio_port_config sink = {
+ .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patch_is_valid(
+ (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patch_is_valid(
+ (PatchBuilder{}).addSource(src).addSink(sink).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patch_is_valid(
+ (PatchBuilder{}).addSource(src).addSource(src).
+ addSink(sink).addSink(sink).patch()));
+}
+
+TEST(SystemAudioTest, PatchHwAvSync) {
+ audio_port_config device_src_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+ device_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+ device_src_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+
+ audio_port_config device_sink_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+ device_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+ device_sink_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+
+ audio_port_config mix_sink_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_MIX };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+ mix_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+ mix_sink_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+
+ audio_port_config mix_src_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_MIX };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+ mix_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+ mix_src_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+}
+
+TEST(SystemAudioTest, PatchEqual) {
+ const audio_patch patch1{}, patch2{};
+ // Invalid patches are not equal.
+ ASSERT_FALSE(audio_patches_are_equal(&patch1, &patch2));
+ const audio_port_config src = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+ const audio_port_config sink = {
+ .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_FALSE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+ ASSERT_FALSE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+ (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+ audio_port_config sink_hw_av_sync = sink;
+ sink_hw_av_sync.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ sink_hw_av_sync.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ ASSERT_FALSE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+ ASSERT_TRUE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+}
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index c41de82..b85dd51 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -582,7 +582,7 @@
Status CameraService::makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
- bool legacyMode, int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+ int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client) {
if (halVersion < 0 || halVersion == deviceVersion) {
@@ -594,7 +594,7 @@
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
*client = new CameraClient(cameraService, tmp, packageName,
api1CameraId, facing, clientPid, clientUid,
- getpid(), legacyMode);
+ getpid());
} else { // Camera2 API route
ALOGW("Camera using old HAL version: %d", deviceVersion);
return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
@@ -612,7 +612,7 @@
*client = new Camera2Client(cameraService, tmp, packageName,
cameraId, api1CameraId,
facing, clientPid, clientUid,
- servicePid, legacyMode);
+ servicePid);
} else { // Camera2 API route
sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
@@ -636,7 +636,7 @@
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
*client = new CameraClient(cameraService, tmp, packageName,
api1CameraId, facing, clientPid, clientUid,
- servicePid, legacyMode);
+ servicePid);
} else {
// Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
@@ -735,8 +735,7 @@
sp<ICameraClient>{nullptr}, id, cameraId,
static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED),
internalPackageName, uid, USE_CALLING_PID,
- API_1, /*legacyMode*/ false, /*shimUpdateOnly*/ true,
- /*out*/ tmp)
+ API_1, /*shimUpdateOnly*/ true, /*out*/ tmp)
).isOk()) {
ALOGE("%s: Error initializing shim metadata: %s", __FUNCTION__, ret.toString8().string());
}
@@ -1200,8 +1199,7 @@
sp<Client> client = nullptr;
ret = connectHelper<ICameraClient,Client>(cameraClient, id, api1CameraId,
CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, clientPid, API_1,
- /*legacyMode*/ false, /*shimUpdateOnly*/ false,
- /*out*/client);
+ /*shimUpdateOnly*/ false, /*out*/client);
if(!ret.isOk()) {
logRejected(id, getCallingPid(), String8(clientPackageName),
@@ -1227,8 +1225,7 @@
Status ret = Status::ok();
sp<Client> client = nullptr;
ret = connectHelper<ICameraClient,Client>(cameraClient, id, api1CameraId, halVersion,
- clientPackageName, clientUid, USE_CALLING_PID, API_1,
- /*legacyMode*/ true, /*shimUpdateOnly*/ false,
+ clientPackageName, clientUid, USE_CALLING_PID, API_1, /*shimUpdateOnly*/ false,
/*out*/client);
if(!ret.isOk()) {
@@ -1256,9 +1253,7 @@
ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
/*api1CameraId*/-1,
CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
- clientUid, USE_CALLING_PID, API_2,
- /*legacyMode*/ false, /*shimUpdateOnly*/ false,
- /*out*/client);
+ clientUid, USE_CALLING_PID, API_2, /*shimUpdateOnly*/ false, /*out*/client);
if(!ret.isOk()) {
logRejected(id, getCallingPid(), String8(clientPackageName),
@@ -1273,7 +1268,7 @@
template<class CALLBACK, class CLIENT>
Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
int api1CameraId, int halVersion, const String16& clientPackageName, int clientUid,
- int clientPid, apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+ int clientPid, apiLevel effectiveApiLevel, bool shimUpdateOnly,
/*out*/sp<CLIENT>& device) {
binder::Status ret = binder::Status::ok();
@@ -1358,7 +1353,7 @@
sp<BasicClient> tmp = nullptr;
if(!(ret = makeClient(this, cameraCb, clientPackageName,
cameraId, api1CameraId, facing,
- clientPid, clientUid, getpid(), legacyMode,
+ clientPid, clientUid, getpid(),
halVersion, deviceVersion, effectiveApiLevel,
/*out*/&tmp)).isOk()) {
return ret;
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 8d4bcdb..e4a18d3 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -585,8 +585,7 @@
template<class CALLBACK, class CLIENT>
binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
int api1CameraId, int halVersion, const String16& clientPackageName,
- int clientUid, int clientPid,
- apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+ int clientUid, int clientPid, apiLevel effectiveApiLevel, bool shimUpdateOnly,
/*out*/sp<CLIENT>& device);
// Lock guarding camera service state
@@ -844,7 +843,7 @@
static binder::Status makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
- bool legacyMode, int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+ int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client);
status_t checkCameraAccess(const String16& opPackageName);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index c8b3c2f..bf18c48 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -54,8 +54,7 @@
int cameraFacing,
int clientPid,
uid_t clientUid,
- int servicePid,
- bool legacyMode):
+ int servicePid):
Camera2ClientBase(cameraService, cameraClient, clientPackageName,
cameraDeviceId, api1CameraId, cameraFacing,
clientPid, clientUid, servicePid),
@@ -65,8 +64,6 @@
SharedParameters::Lock l(mParameters);
l.mParameters.state = Parameters::DISCONNECTED;
-
- mLegacyMode = legacyMode;
}
status_t Camera2Client::initialize(sp<CameraProviderManager> manager, const String8& monitorTags) {
@@ -456,8 +453,6 @@
mDevice->disconnect();
- mDevice.clear();
-
CameraService::Client::disconnect();
return res;
@@ -1443,7 +1438,7 @@
return OK;
}
-status_t Camera2Client::takePicture(int msgType) {
+status_t Camera2Client::takePicture(int /*msgType*/) {
ATRACE_CALL();
Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
@@ -1542,7 +1537,7 @@
// Need HAL to have correct settings before (possibly) triggering precapture
syncWithDevice();
- res = mCaptureSequencer->startCapture(msgType);
+ res = mCaptureSequencer->startCapture();
if (res != OK) {
ALOGE("%s: Camera %d: Unable to start capture: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -1662,27 +1657,6 @@
return OK;
}
- // the camera2 api legacy mode can unconditionally disable the shutter sound
- if (mLegacyMode) {
- ALOGV("%s: Disable shutter sound in legacy mode", __FUNCTION__);
- l.mParameters.playShutterSound = false;
- return OK;
- }
-
- // Disabling shutter sound may not be allowed. In that case only
- // allow the mediaserver process to disable the sound.
- char value[PROPERTY_VALUE_MAX];
- property_get("ro.camera.sound.forced", value, "0");
- if (strncmp(value, "0", 2) != 0) {
- // Disabling shutter sound is not allowed. Deny if the current
- // process is not mediaserver.
- if (getCallingPid() != getpid()) {
- ALOGE("Failed to disable shutter sound. Permission denied (pid %d)",
- getCallingPid());
- return PERMISSION_DENIED;
- }
- }
-
l.mParameters.playShutterSound = false;
return OK;
}
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 44929c3..a9ea271 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -96,8 +96,7 @@
int cameraFacing,
int clientPid,
uid_t clientUid,
- int servicePid,
- bool legacyMode);
+ int servicePid);
virtual ~Camera2Client();
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index f1203f9..ce44efe 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -40,7 +40,7 @@
const String16& clientPackageName,
int cameraId, int cameraFacing,
int clientPid, int clientUid,
- int servicePid, bool legacyMode):
+ int servicePid):
Client(cameraService, cameraClient, clientPackageName,
String8::format("%d", cameraId), cameraId, cameraFacing, clientPid,
clientUid, servicePid)
@@ -57,7 +57,6 @@
// Callback is disabled by default
mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
- mLegacyMode = legacyMode;
mPlayShutterSound = true;
LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
}
@@ -715,26 +714,6 @@
return OK;
}
- // the camera2 api legacy mode can unconditionally disable the shutter sound
- if (mLegacyMode) {
- ALOGV("%s: Disable shutter sound in legacy mode", __FUNCTION__);
- mPlayShutterSound = false;
- return OK;
- }
-
- // Disabling shutter sound may not be allowed. In that case only
- // allow the mediaserver process to disable the sound.
- char value[PROPERTY_VALUE_MAX];
- property_get("ro.camera.sound.forced", value, "0");
- if (strcmp(value, "0") != 0) {
- // Disabling shutter sound is not allowed. Deny if the current
- // process is not mediaserver.
- if (getCallingPid() != getpid()) {
- ALOGE("Failed to disable shutter sound. Permission denied (pid %d)", getCallingPid());
- return PERMISSION_DENIED;
- }
- }
-
mPlayShutterSound = false;
return OK;
}
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 1910536..9530b6c 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -68,8 +68,7 @@
int cameraFacing,
int clientPid,
int clientUid,
- int servicePid,
- bool legacyMode = false);
+ int servicePid);
~CameraClient();
virtual status_t initialize(sp<CameraProviderManager> manager,
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 1ee216f..f42cdd3 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -50,8 +50,7 @@
mStateTransitionCount(0),
mTriggerId(0),
mTimeoutCount(0),
- mCaptureId(Camera2Client::kCaptureRequestIdStart),
- mMsgType(0) {
+ mCaptureId(Camera2Client::kCaptureRequestIdStart) {
ALOGV("%s", __FUNCTION__);
}
@@ -64,7 +63,7 @@
mZslProcessor = processor;
}
-status_t CaptureSequencer::startCapture(int msgType) {
+status_t CaptureSequencer::startCapture() {
ALOGV("%s", __FUNCTION__);
ATRACE_CALL();
Mutex::Autolock l(mInputMutex);
@@ -73,7 +72,6 @@
return INVALID_OPERATION;
}
if (!mStartCapture) {
- mMsgType = msgType;
mStartCapture = true;
mStartCaptureSignal.signal();
}
@@ -386,7 +384,7 @@
SharedParameters::Lock l(client->getParameters());
/* warning: this also locks a SharedCameraCallbacks */
- shutterNotifyLocked(l.mParameters, client, mMsgType);
+ shutterNotifyLocked(l.mParameters, client);
mShutterNotified = true;
mTimeoutCount = kMaxTimeoutsForCaptureEnd;
return STANDARD_CAPTURE_WAIT;
@@ -610,7 +608,7 @@
if (!mShutterNotified) {
SharedParameters::Lock l(client->getParameters());
/* warning: this also locks a SharedCameraCallbacks */
- shutterNotifyLocked(l.mParameters, client, mMsgType);
+ shutterNotifyLocked(l.mParameters, client);
mShutterNotified = true;
}
} else if (mTimeoutCount <= 0) {
@@ -715,12 +713,11 @@
}
/*static*/ void CaptureSequencer::shutterNotifyLocked(const Parameters ¶ms,
- const sp<Camera2Client>& client, int msgType) {
+ const sp<Camera2Client>& client) {
ATRACE_CALL();
if (params.state == Parameters::STILL_CAPTURE
- && params.playShutterSound
- && (msgType & CAMERA_MSG_SHUTTER)) {
+ && params.playShutterSound) {
client->getCameraService()->playSound(CameraService::SOUND_SHUTTER);
}
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index f2e3750..c23b12d 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -51,7 +51,7 @@
void setZslProcessor(const wp<ZslProcessor>& processor);
// Begin still image capture
- status_t startCapture(int msgType);
+ status_t startCapture();
// Wait until current image capture completes; returns immediately if no
// capture is active. Returns TIMED_OUT if capture does not complete during
@@ -145,7 +145,6 @@
bool mAeInPrecapture;
int32_t mCaptureId;
- int mMsgType;
// Main internal methods
@@ -172,7 +171,7 @@
// Emit Shutter/Raw callback to java, and maybe play a shutter sound
static void shutterNotifyLocked(const Parameters ¶ms,
- const sp<Camera2Client>& client, int msgType);
+ const sp<Camera2Client>& client);
};
}; // namespace camera2
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 0c738e7..683e84d 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -33,7 +33,10 @@
FrameProcessorBase(device),
mClient(client),
mLastFrameNumberOfFaces(0),
- mLast3AFrameNumber(-1) {
+ mLast3AFrameNumber(-1),
+ mLastAEFrameNumber(-1),
+ mLastAFrameNumber(-1),
+ mLastAWBFrameNumber(-1) {
sp<CameraDeviceBase> d = device.promote();
mSynthesize3ANotify = !(d->willNotify3A());
@@ -262,24 +265,73 @@
bool gotAllStates = true;
// TODO: Also use AE mode, AE trigger ID
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
+ bool gotAFState = updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
&pendingState.afMode, frameNumber, cameraId);
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
+ bool gotAWBState = updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
&pendingState.awbMode, frameNumber, cameraId);
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
+ bool gotAEState = updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
&pendingState.aeState, frameNumber, cameraId);
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
+ gotAFState &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
&pendingState.afState, frameNumber, cameraId);
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
+ gotAWBState &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
&pendingState.awbState, frameNumber, cameraId);
pendingState.afTriggerId = frame.mResultExtras.afTriggerId;
pendingState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
+ if (gotAEState && (frameNumber > mLastAEFrameNumber)) {
+ if (pendingState.aeState != m3aState.aeState ||
+ pendingState.aeTriggerId > m3aState.aeTriggerId) {
+ ALOGV("%s: Camera %d: AE state %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.aeState, pendingState.aeState);
+ client->notifyAutoExposure(pendingState.aeState, pendingState.aeTriggerId);
+
+ m3aState.aeState = pendingState.aeState;
+ m3aState.aeTriggerId = pendingState.aeTriggerId;
+ mLastAEFrameNumber = frameNumber;
+ }
+ }
+
+ if (gotAFState && (frameNumber > mLastAFrameNumber)) {
+ if (pendingState.afState != m3aState.afState ||
+ pendingState.afMode != m3aState.afMode ||
+ pendingState.afTriggerId != m3aState.afTriggerId) {
+ ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.afState, pendingState.afState,
+ m3aState.afMode, pendingState.afMode,
+ m3aState.afTriggerId, pendingState.afTriggerId);
+ client->notifyAutoFocus(pendingState.afState, pendingState.afTriggerId);
+
+ m3aState.afState = pendingState.afState;
+ m3aState.afMode = pendingState.afMode;
+ m3aState.afTriggerId = pendingState.afTriggerId;
+ mLastAFrameNumber = frameNumber;
+ }
+ }
+
+ if (gotAWBState && (frameNumber > mLastAWBFrameNumber)) {
+ if (pendingState.awbState != m3aState.awbState ||
+ pendingState.awbMode != m3aState.awbMode) {
+ ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.awbState, pendingState.awbState,
+ m3aState.awbMode, pendingState.awbMode);
+ client->notifyAutoWhitebalance(pendingState.awbState,
+ pendingState.aeTriggerId);
+
+ m3aState.awbMode = pendingState.awbMode;
+ m3aState.awbState = pendingState.awbState;
+ mLastAWBFrameNumber = frameNumber;
+ }
+ }
+
+ gotAllStates &= gotAEState & gotAFState & gotAWBState;
if (!gotAllStates) {
// If not all states are received, put the pending state to mPending3AStates.
if (index == NAME_NOT_FOUND) {
@@ -290,40 +342,10 @@
return NOT_ENOUGH_DATA;
}
- // Once all 3A states are received, notify the client about 3A changes.
- if (pendingState.aeState != m3aState.aeState ||
- pendingState.aeTriggerId > m3aState.aeTriggerId) {
- ALOGV("%s: Camera %d: AE state %d->%d",
- __FUNCTION__, cameraId,
- m3aState.aeState, pendingState.aeState);
- client->notifyAutoExposure(pendingState.aeState, pendingState.aeTriggerId);
- }
-
- if (pendingState.afState != m3aState.afState ||
- pendingState.afMode != m3aState.afMode ||
- pendingState.afTriggerId != m3aState.afTriggerId) {
- ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
- __FUNCTION__, cameraId,
- m3aState.afState, pendingState.afState,
- m3aState.afMode, pendingState.afMode,
- m3aState.afTriggerId, pendingState.afTriggerId);
- client->notifyAutoFocus(pendingState.afState, pendingState.afTriggerId);
- }
- if (pendingState.awbState != m3aState.awbState ||
- pendingState.awbMode != m3aState.awbMode) {
- ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
- __FUNCTION__, cameraId,
- m3aState.awbState, pendingState.awbState,
- m3aState.awbMode, pendingState.awbMode);
- client->notifyAutoWhitebalance(pendingState.awbState,
- pendingState.aeTriggerId);
- }
-
if (index != NAME_NOT_FOUND) {
mPending3AStates.removeItemsAt(index);
}
- m3aState = pendingState;
mLast3AFrameNumber = frameNumber;
return OK;
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 62a4e91..8183c12 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -104,8 +104,7 @@
// Track most recent frame number for which 3A notifications were sent for.
// Used to filter against sending 3A notifications for the same frame
// several times.
- int32_t mLast3AFrameNumber;
-
+ int32_t mLast3AFrameNumber, mLastAEFrameNumber, mLastAFrameNumber, mLastAWBFrameNumber;
// Emit FaceDetection event to java if faces changed
void callbackFaceDetection(const sp<Camera2Client>& client,
const camera_frame_metadata &metadata);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 98d0534..84428c2 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -661,7 +661,8 @@
}
sp<Surface> surface;
- res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer);
+ res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
+ physicalCameraId);
if (!res.isOk())
return res;
@@ -889,6 +890,8 @@
const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
outputConfiguration.getGraphicBufferProducers();
+ String8 physicalCameraId(outputConfiguration.getPhysicalCameraId());
+
auto producerCount = bufferProducers.size();
if (producerCount == 0) {
ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
@@ -942,7 +945,7 @@
OutputStreamInfo outInfo;
sp<Surface> surface;
res = createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false, surface,
- newOutputsMap.valueAt(i));
+ newOutputsMap.valueAt(i), physicalCameraId);
if (!res.isOk())
return res;
@@ -1021,7 +1024,8 @@
binder::Status CameraDeviceClient::createSurfaceFromGbp(
OutputStreamInfo& streamInfo, bool isStreamInfoValid,
- sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp) {
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+ const String8& physicalId) {
// bufferProducer must be non-null
if (gbp == nullptr) {
@@ -1098,7 +1102,7 @@
// Round dimensions to the nearest dimensions available for this format
if (flexibleConsumer && isPublicFormat(format) &&
!CameraDeviceClient::roundBufferDimensionNearest(width, height,
- format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
+ format, dataSpace, mDevice->info(physicalId), /*out*/&width, /*out*/&height)) {
String8 msg = String8::format("Camera %s: No supported stream configurations with "
"format %#x defined, failed to create output stream",
mCameraIdStr.string(), format);
@@ -1468,6 +1472,7 @@
const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
outputConfiguration.getGraphicBufferProducers();
+ String8 physicalId(outputConfiguration.getPhysicalCameraId());
if (bufferProducers.size() == 0) {
ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
@@ -1521,7 +1526,7 @@
sp<Surface> surface;
res = createSurfaceFromGbp(mStreamInfoMap[streamId], true /*isStreamInfoValid*/,
- surface, bufferProducer);
+ surface, bufferProducer, physicalId);
if (!res.isOk())
return res;
@@ -1677,9 +1682,12 @@
// WORKAROUND: HAL refuses to disconnect while there's streams in flight
{
- mDevice->clearStreamingRequest();
-
+ int64_t lastFrameNumber;
status_t code;
+ if ((code = mDevice->flush(&lastFrameNumber)) != OK) {
+ ALOGE("%s: flush failed with code 0x%x", __FUNCTION__, code);
+ }
+
if ((code = mDevice->waitUntilDrained()) != OK) {
ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__,
code);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 5aaf5aa..c30561d 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -258,7 +258,8 @@
// Create a Surface from an IGraphicBufferProducer. Returns error if
// IGraphicBufferProducer's property doesn't match with streamInfo
binder::Status createSurfaceFromGbp(OutputStreamInfo& streamInfo, bool isStreamInfoValid,
- sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp);
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+ const String8& physicalCameraId);
// Utility method to insert the surface into SurfaceMap
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index ce006a7..aeea473 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -57,13 +57,13 @@
cameraId, api1CameraId, cameraFacing, clientPid, clientUid, servicePid),
mSharedCameraCallbacks(remoteCallback),
mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
+ mDevice(new Camera3Device(cameraId)),
mDeviceActive(false), mApi1CameraId(api1CameraId)
{
ALOGI("Camera %s: Opened. Client: %s (PID %d, UID %d)", cameraId.string(),
String8(clientPackageName).string(), clientPid, clientUid);
mInitialClientPid = clientPid;
- mDevice = new Camera3Device(cameraId);
LOG_ALWAYS_FATAL_IF(mDevice == 0, "Device should never be NULL here.");
}
@@ -206,8 +206,6 @@
if (mDevice == 0) return;
mDevice->disconnect();
- mDevice.clear();
-
ALOGV("Camera %s: Detach complete", TClientBase::mCameraIdStr.string());
}
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index e74fbdf..6693847 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -130,7 +130,10 @@
/** CameraDeviceBase instance wrapping HAL3+ entry */
const int mDeviceVersion;
- sp<CameraDeviceBase> mDevice;
+
+ // Set to const to avoid mDevice being updated (update of sp<> is racy) during
+ // dumpDevice (which is important to be lock free for debugging purpose)
+ const sp<CameraDeviceBase> mDevice;
/** Utility members */
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 0ba7403..98c1b5e 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -69,6 +69,10 @@
* The device's static characteristics metadata buffer
*/
virtual const CameraMetadata& info() const = 0;
+ /**
+ * The physical camera device's static characteristics metadata buffer
+ */
+ virtual const CameraMetadata& info(const String8& physicalId) const = 0;
struct PhysicalCameraSettings {
std::string cameraId;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 0ce4318..3be6399 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -336,6 +336,7 @@
const hardware::hidl_string& /*fqName*/,
const hardware::hidl_string& name,
bool /*preexisting*/) {
+ std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
{
std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -458,6 +459,7 @@
}
status_t CameraProviderManager::removeProvider(const std::string& provider) {
+ std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
std::unique_lock<std::mutex> lock(mInterfaceMutex);
std::vector<String8> removedDeviceIds;
status_t res = NAME_NOT_FOUND;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index b8b8b8c..c523c2d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -246,6 +246,9 @@
wp<StatusListener> mListener;
ServiceInteractionProxy* mServiceProxy;
+ // mProviderLifecycleLock is locked during onRegistration and removeProvider
+ mutable std::mutex mProviderLifecycleLock;
+
static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
struct ProviderInfo :
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 28ffc8b..7656407 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -90,7 +90,7 @@
{
ATRACE_CALL();
ALOGV("%s: Tearing down for camera id %s", __FUNCTION__, mId.string());
- disconnect();
+ disconnectImpl();
}
const String8& Camera3Device::getId() const {
@@ -121,11 +121,25 @@
res = manager->getCameraCharacteristics(mId.string(), &mDeviceInfo);
if (res != OK) {
- SET_ERR_L("Could not retrive camera characteristics: %s (%d)", strerror(-res), res);
+ SET_ERR_L("Could not retrieve camera characteristics: %s (%d)", strerror(-res), res);
session->close();
return res;
}
+ std::vector<std::string> physicalCameraIds;
+ bool isLogical = CameraProviderManager::isLogicalCamera(mDeviceInfo, &physicalCameraIds);
+ if (isLogical) {
+ for (auto& physicalId : physicalCameraIds) {
+ res = manager->getCameraCharacteristics(physicalId, &mPhysicalDeviceInfoMap[physicalId]);
+ if (res != OK) {
+ SET_ERR_L("Could not retrieve camera %s characteristics: %s (%d)",
+ physicalId.c_str(), strerror(-res), res);
+ session->close();
+ return res;
+ }
+ }
+ }
+
std::shared_ptr<RequestMetadataQueue> queue;
auto requestQueueRet = session->getCaptureRequestMetadataQueue(
[&queue](const auto& descriptor) {
@@ -261,8 +275,13 @@
}
status_t Camera3Device::disconnect() {
+ return disconnectImpl();
+}
+
+status_t Camera3Device::disconnectImpl() {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock stLock(mTrackerLock);
ALOGI("%s: E", __FUNCTION__);
@@ -719,7 +738,7 @@
return OK;
}
-const CameraMetadata& Camera3Device::info() const {
+const CameraMetadata& Camera3Device::info(const String8& physicalId) const {
ALOGVV("%s: E", __FUNCTION__);
if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED ||
mStatus == STATUS_ERROR)) {
@@ -727,7 +746,22 @@
mStatus == STATUS_ERROR ?
"when in error state" : "before init");
}
- return mDeviceInfo;
+ if (physicalId.isEmpty()) {
+ return mDeviceInfo;
+ } else {
+ std::string id(physicalId.c_str());
+ if (mPhysicalDeviceInfoMap.find(id) != mPhysicalDeviceInfoMap.end()) {
+ return mPhysicalDeviceInfoMap.at(id);
+ } else {
+ ALOGE("%s: Invalid physical camera id %s", __FUNCTION__, physicalId.c_str());
+ return mDeviceInfo;
+ }
+ }
+}
+
+const CameraMetadata& Camera3Device::info() const {
+ String8 emptyId;
+ return info(emptyId);
}
status_t Camera3Device::checkStatusOkToCaptureLocked() {
@@ -2708,18 +2742,19 @@
status_t Camera3Device::registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
bool hasAppCallback, nsecs_t maxExpectedDuration,
- std::set<String8>& physicalCameraIds) {
+ std::set<String8>& physicalCameraIds, bool isStillCapture) {
ATRACE_CALL();
Mutex::Autolock l(mInFlightLock);
ssize_t res;
res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
- hasAppCallback, maxExpectedDuration, physicalCameraIds));
+ hasAppCallback, maxExpectedDuration, physicalCameraIds, isStillCapture));
if (res < 0) return res;
if (mInFlightMap.size() == 1) {
- // hold mLock to prevent race with disconnect
- Mutex::Autolock l(mLock);
+ // Hold a separate dedicated tracker lock to prevent race with disconnect and also
+ // avoid a deadlock during reprocess requests.
+ Mutex::Autolock l(mTrackerLock);
if (mStatusTracker != nullptr) {
mStatusTracker->markComponentActive(mInFlightStatusId);
}
@@ -2752,8 +2787,9 @@
// Indicate idle inFlightMap to the status tracker
if (mInFlightMap.size() == 0) {
- // hold mLock to prevent race with disconnect
- Mutex::Autolock l(mLock);
+ // Hold a separate dedicated tracker lock to prevent race with disconnect and also
+ // avoid a deadlock during reprocess requests.
+ Mutex::Autolock l(mTrackerLock);
if (mStatusTracker != nullptr) {
mStatusTracker->markComponentIdle(mInFlightStatusId, Fence::NO_FENCE);
}
@@ -2778,6 +2814,10 @@
if (request.numBuffersLeft == 0 &&
(request.skipResultMetadata ||
(request.haveResultMetadata && shutterTimestamp != 0))) {
+ if (request.stillCapture) {
+ ATRACE_ASYNC_END("still capture", frameNumber);
+ }
+
ATRACE_ASYNC_END("frame capture", frameNumber);
// Sanity check - if sensor timestamp matches shutter timestamp in the
@@ -3908,18 +3948,17 @@
}
hardware::details::return_status err;
+ auto resultCallback =
+ [&status, &numRequestProcessed] (auto s, uint32_t n) {
+ status = s;
+ *numRequestProcessed = n;
+ };
if (hidlSession_3_4 != nullptr) {
err = hidlSession_3_4->processCaptureRequest_3_4(captureRequests_3_4, cachesToRemove,
- [&status, &numRequestProcessed] (auto s, uint32_t n) {
- status = s;
- *numRequestProcessed = n;
- });
+ resultCallback);
} else {
err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
- [&status, &numRequestProcessed] (auto s, uint32_t n) {
- status = s;
- *numRequestProcessed = n;
- });
+ resultCallback);
}
if (!err.isOk()) {
ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
@@ -4729,6 +4768,7 @@
status_t Camera3Device::RequestThread::prepareHalRequests() {
ATRACE_CALL();
+ bool batchedRequest = mNextRequests[0].captureRequest->mBatchSize > 1;
for (size_t i = 0; i < mNextRequests.size(); i++) {
auto& nextRequest = mNextRequests.editItemAt(i);
sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
@@ -4752,7 +4792,10 @@
mPrevTriggers = triggerCount;
// If the request is the same as last, or we had triggers last time
- bool newRequest = mPrevRequest != captureRequest || triggersMixedIn;
+ bool newRequest = (mPrevRequest != captureRequest || triggersMixedIn) &&
+ // Request settings are all the same within one batch, so only treat the first
+ // request in a batch as new
+ !(batchedRequest && i > 0);
if (newRequest) {
/**
* HAL workaround:
@@ -4901,15 +4944,24 @@
// preview), and the current request is not the last one in the batch,
// do not send callback to the app.
bool hasCallback = true;
- if (mNextRequests[0].captureRequest->mBatchSize > 1 && i != mNextRequests.size()-1) {
+ if (batchedRequest && i != mNextRequests.size()-1) {
hasCallback = false;
}
+ bool isStillCapture = false;
+ if (!mNextRequests[0].captureRequest->mSettingsList.begin()->metadata.isEmpty()) {
+ camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
+ find_camera_metadata_ro_entry(halRequest->settings, ANDROID_CONTROL_CAPTURE_INTENT, &e);
+ if ((e.count > 0) && (e.data.u8[0] == ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE)) {
+ isStillCapture = true;
+ ATRACE_ASYNC_BEGIN("still capture", mNextRequests[i].halRequest.frame_number);
+ }
+ }
res = parent->registerInFlight(halRequest->frame_number,
totalNumBuffers, captureRequest->mResultExtras,
/*hasInput*/halRequest->input_buffer != NULL,
hasCallback,
calculateMaxExpectedDuration(halRequest->settings),
- requestedPhysicalCameras);
+ requestedPhysicalCameras, isStillCapture);
ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
", burstId = %" PRId32 ".",
__FUNCTION__,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index d8fe19f..85f9614 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -101,6 +101,7 @@
status_t disconnect() override;
status_t dump(int fd, const Vector<String16> &args) override;
const CameraMetadata& info() const override;
+ const CameraMetadata& info(const String8& physicalId) const override;
// Capture and setStreamingRequest will configure streams if currently in
// idle state
@@ -209,6 +210,8 @@
private:
+ status_t disconnectImpl();
+
// internal typedefs
using RequestMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
using ResultMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
@@ -379,6 +382,7 @@
sp<HalInterface> mInterface;
CameraMetadata mDeviceInfo;
+ std::unordered_map<std::string, CameraMetadata> mPhysicalDeviceInfoMap;
CameraMetadata mRequestTemplateCache[CAMERA3_TEMPLATE_COUNT];
@@ -992,6 +996,9 @@
// Map of physicalCameraId <-> Metadata
std::vector<PhysicalCaptureResultInfo> physicalMetadatas;
+ // Indicates a still capture request.
+ bool stillCapture;
+
// Default constructor needed by KeyedVector
InFlightRequest() :
shutterTimestamp(0),
@@ -1002,12 +1009,13 @@
hasInputBuffer(false),
hasCallback(true),
maxExpectedDuration(kDefaultExpectedDuration),
- skipResultMetadata(false) {
+ skipResultMetadata(false),
+ stillCapture(false) {
}
InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
bool hasAppCallback, nsecs_t maxDuration,
- const std::set<String8>& physicalCameraIdSet) :
+ const std::set<String8>& physicalCameraIdSet, bool isStillCapture) :
shutterTimestamp(0),
sensorTimestamp(0),
requestStatus(OK),
@@ -1018,7 +1026,8 @@
hasCallback(hasAppCallback),
maxExpectedDuration(maxDuration),
skipResultMetadata(false),
- physicalCameraIds(physicalCameraIdSet) {
+ physicalCameraIds(physicalCameraIdSet),
+ stillCapture(isStillCapture) {
}
};
@@ -1032,10 +1041,10 @@
nsecs_t mExpectedInflightDuration = 0;
int mInFlightStatusId;
-
status_t registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
- bool callback, nsecs_t maxExpectedDuration, std::set<String8>& physicalCameraIds);
+ bool callback, nsecs_t maxExpectedDuration, std::set<String8>& physicalCameraIds,
+ bool isStillCapture);
/**
* Returns the maximum expected time it'll take for all currently in-flight
@@ -1208,6 +1217,9 @@
static callbacks_notify_t sNotify;
+ // Synchronizes access to status tracker between inflight updates and disconnect.
+ // b/79972865
+ Mutex mTrackerLock;
}; // class Camera3Device
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
index 4dafefd..ae7af8e 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.cpp
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -312,8 +312,8 @@
int32_t coords[4] = {
rects[i],
rects[i + 1],
- rects[i] + rects[i + 2],
- rects[i + 1] + rects[i + 3]
+ rects[i] + rects[i + 2] - 1,
+ rects[i + 1] + rects[i + 3] - 1
};
mapRawToCorrected(coords, 2, clamp, simple);
@@ -321,8 +321,8 @@
// Map back to (l, t, width, height)
rects[i] = coords[0];
rects[i + 1] = coords[1];
- rects[i + 2] = coords[2] - coords[0];
- rects[i + 3] = coords[3] - coords[1];
+ rects[i + 2] = coords[2] - coords[0] + 1;
+ rects[i + 3] = coords[3] - coords[1] + 1;
}
return OK;
@@ -400,8 +400,8 @@
int32_t coords[4] = {
rects[i],
rects[i + 1],
- rects[i] + rects[i + 2],
- rects[i + 1] + rects[i + 3]
+ rects[i] + rects[i + 2] - 1,
+ rects[i + 1] + rects[i + 3] - 1
};
mapCorrectedToRaw(coords, 2, clamp, simple);
@@ -409,8 +409,8 @@
// Map back to (l, t, width, height)
rects[i] = coords[0];
rects[i + 1] = coords[1];
- rects[i + 2] = coords[2] - coords[0];
- rects[i + 3] = coords[3] - coords[1];
+ rects[i + 2] = coords[2] - coords[0] + 1;
+ rects[i + 3] = coords[3] - coords[1] + 1;
}
return OK;
diff --git a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
index 2a689c6..54935c9 100644
--- a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
+++ b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
@@ -167,6 +167,30 @@
}
}
+TEST(DistortionMapperTest, ClampConsistency) {
+ status_t res;
+
+ std::array<int32_t, 4> activeArray = {0, 0, 4032, 3024};
+ DistortionMapper m;
+ setupTestMapper(&m, identityDistortion, testICal, /*activeArray*/ activeArray.data(),
+ /*preCorrectionActiveArray*/ activeArray.data());
+
+ auto rectsOrig = activeArray;
+ res = m.mapCorrectedRectToRaw(activeArray.data(), 1, /*clamp*/true, /*simple*/ true);
+ ASSERT_EQ(res, OK);
+
+ for (size_t i = 0; i < activeArray.size(); i++) {
+ EXPECT_EQ(activeArray[i], rectsOrig[i]);
+ }
+
+ res = m.mapRawRectToCorrected(activeArray.data(), 1, /*clamp*/true, /*simple*/ true);
+ ASSERT_EQ(res, OK);
+
+ for (size_t i = 0; i < activeArray.size(); i++) {
+ EXPECT_EQ(activeArray[i], rectsOrig[i]);
+ }
+}
+
TEST(DistortionMapperTest, SimpleTransform) {
status_t res;
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index c0a353f..f4c49ec 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -49,7 +49,8 @@
std::lock_guard<std::mutex> lock(mMonitorMutex);
// Expand shorthands
- if (ssize_t idx = tagNames.find("3a") != -1) {
+ ssize_t idx = tagNames.find("3a");
+ if (idx != -1) {
ssize_t end = tagNames.find(",", idx);
char* start = tagNames.lockBuffer(tagNames.size());
start[idx] = '\0';
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
index 6ec8895..edf4dab 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -55,4 +55,8 @@
getdents64: 1
getrandom: 1
+# Used by UBSan diagnostic messages
+readlink: 1
+open: 1
+
@include /system/etc/seccomp_policy/crash_dump.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediacodec-x86.policy b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
index bbbe552..4031b11 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-x86.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
@@ -24,6 +24,7 @@
mmap2: 1
fstat64: 1
stat64: 1
+statfs64: 1
madvise: 1
fstatat64: 1
futex: 1
@@ -55,4 +56,8 @@
getpid: 1
gettid: 1
+# Used by UBSan diagnostic messages
+readlink: 1
+open: 1
+
@include /system/etc/seccomp_policy/crash_dump.x86.policy
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 37d6cc9..73c9535 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -18,6 +18,7 @@
LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediaextractor.policy
LOCAL_REQUIRED_MODULES_arm64 := crash_dump.policy mediaextractor.policy
LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaextractor.policy
+LOCAL_REQUIRED_MODULES_x86_64 := crash_dump.policy mediaextractor.policy
# extractor libraries
LOCAL_REQUIRED_MODULES += \
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
old mode 100755
new mode 100644
index 63c7780..6d9ed6f
--- a/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
@@ -21,6 +21,7 @@
getuid: 1
setpriority: 1
sigaltstack: 1
+fstatfs: 1
newfstatat: 1
restart_syscall: 1
exit: 1
@@ -30,28 +31,21 @@
sched_setscheduler: 1
getrlimit: 1
nanosleep: 1
+getrandom: 1
+
+# for dynamically loading extractors
+getdents64: 1
+readlinkat: 1
+pread64: 1
+mremap: 1
# for FileSource
readlinkat: 1
-# for attaching to debuggerd on process crash
-tgkill: 1
-socket: arg0 == 1
-connect: 1
-fcntl: 1
-rt_sigprocmask: 1
-rt_sigaction: 1
-rt_tgsigqueueinfo: 1
-geteuid: 1
-getgid: 1
-getegid: 1
-getgroups: 1
-getdents64: 1
-pipe2: 1
-ppoll: 1
-
# Required by AddressSanitizer
gettid: 1
sched_yield: 1
getpid: 1
gettid: 1
+
+@include /system/etc/seccomp_policy/crash_dump.x86_64.policy
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index 29e6dfc..ca96f62 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -9,7 +9,9 @@
shared_libs: [
"libaudioutils",
"libbinder",
+ "libcutils",
"liblog",
+ "libmediautils",
"libnbaio",
"libnblog",
"libutils",
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index 1be5544..e58dff7 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -21,7 +21,7 @@
#include <utils/Log.h>
#include <binder/PermissionCache.h>
#include <media/nblog/NBLog.h>
-#include <private/android_filesystem_config.h>
+#include <mediautils/ServiceUtilities.h>
#include "MediaLogService.h"
namespace android {
@@ -53,7 +53,7 @@
void MediaLogService::registerWriter(const sp<IMemory>& shared, size_t size, const char *name)
{
- if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0 ||
+ if (!isAudioServerOrMediaServerUid(IPCThreadState::self()->getCallingUid()) || shared == 0 ||
size < kMinSize || size > kMaxSize || name == NULL ||
shared->size() < NBLog::Timeline::sharedSize(size)) {
return;
@@ -67,7 +67,7 @@
void MediaLogService::unregisterWriter(const sp<IMemory>& shared)
{
- if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0) {
+ if (!isAudioServerOrMediaServerUid(IPCThreadState::self()->getCallingUid()) || shared == 0) {
return;
}
Mutex::Autolock _l(mLock);
@@ -95,10 +95,8 @@
status_t MediaLogService::dump(int fd, const Vector<String16>& args __unused)
{
- // FIXME merge with similar but not identical code at services/audioflinger/ServiceUtilities.cpp
- static const String16 sDump("android.permission.DUMP");
- if (!(IPCThreadState::self()->getCallingUid() == AID_AUDIOSERVER ||
- PermissionCache::checkCallingPermission(sDump))) {
+ if (!(isAudioServerOrMediaServerUid(IPCThreadState::self()->getCallingUid())
+ || dumpAllowed())) {
dprintf(fd, "Permission Denial: can't dump media.log from pid=%d, uid=%d\n",
IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 6a72e5b..94440b1 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -24,6 +24,7 @@
#include <aaudio/AAudio.h>
#include <mediautils/SchedulingPolicyService.h>
+#include <mediautils/ServiceUtilities.h>
#include <utils/String16.h>
#include "binding/AAudioServiceMessage.h"
@@ -33,7 +34,6 @@
#include "AAudioServiceStreamMMAP.h"
#include "AAudioServiceStreamShared.h"
#include "binding/IAAudioService.h"
-#include "ServiceUtilities.h"
using namespace android;
using namespace aaudio;
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index f9e21fb..f30f9bb 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -189,6 +189,7 @@
minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
}
status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+ bool isBufferShareable = mMmapBufferinfo.flags & AUDIO_MMAP_APPLICATION_SHAREABLE;
if (status != OK) {
ALOGE("%s() - createMmapBuffer() failed with status %d %s",
__func__, status, strerror(-status));
@@ -198,18 +199,13 @@
ALOGD("%s() createMmapBuffer() returned = %d, buffer_size = %d, burst_size %d"
", Sharable FD: %s",
__func__, status,
- abs(mMmapBufferinfo.buffer_size_frames),
+ mMmapBufferinfo.buffer_size_frames,
mMmapBufferinfo.burst_size_frames,
- mMmapBufferinfo.buffer_size_frames < 0 ? "Yes" : "No");
+ isBufferShareable ? "Yes" : "No");
}
setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
- // The audio HAL indicates if the shared memory fd can be shared outside of audioserver
- // by returning a negative buffer size
- if (getBufferCapacity() < 0) {
- // Exclusive mode can be used by client or service.
- setBufferCapacity(-getBufferCapacity());
- } else {
+ if (!isBufferShareable) {
// Exclusive mode can only be used by the service because the FD cannot be shared.
uid_t audioServiceUid = getuid();
if ((mMmapClient.clientUid != audioServiceUid) &&
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index 584b2ef..3d5f140 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -53,7 +53,6 @@
libbinder \
libcutils \
libmediautils \
- libserviceutility \
libutils \
liblog
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index ad3666e..3c7d29d 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -34,8 +34,7 @@
libhardware \
libsoundtrigger \
libaudioclient \
- libserviceutility
-
+ libmediautils \
ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL),true)
# libhardware configuration
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index a7d6e83..eb9cd1d 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -27,13 +27,13 @@
#include <cutils/properties.h>
#include <hardware/hardware.h>
#include <media/AudioSystem.h>
+#include <mediautils/ServiceUtilities.h>
#include <utils/Errors.h>
#include <utils/Log.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <system/sound_trigger.h>
-#include <ServiceUtilities.h>
#include "SoundTriggerHwService.h"
#ifdef SOUND_TRIGGER_USE_STUB_MODULE
@@ -562,10 +562,7 @@
if (mHalInterface == 0) {
return NO_INIT;
}
- if (modelMemory == 0 || modelMemory->pointer() == NULL) {
- ALOGE("loadSoundModel() modelMemory is 0 or has NULL pointer()");
- return BAD_VALUE;
- }
+
struct sound_trigger_sound_model *sound_model =
(struct sound_trigger_sound_model *)modelMemory->pointer();
@@ -659,11 +656,6 @@
if (mHalInterface == 0) {
return NO_INIT;
}
- if (dataMemory == 0 || dataMemory->pointer() == NULL) {
- ALOGE("startRecognition() dataMemory is 0 or has NULL pointer()");
- return BAD_VALUE;
-
- }
struct sound_trigger_recognition_config *config =
(struct sound_trigger_recognition_config *)dataMemory->pointer();
@@ -966,6 +958,9 @@
IPCThreadState::self()->getCallingUid())) {
return PERMISSION_DENIED;
}
+ if (checkIMemory(modelMemory) != NO_ERROR) {
+ return BAD_VALUE;
+ }
sp<Module> module = mModule.promote();
if (module == 0) {
@@ -997,6 +992,9 @@
IPCThreadState::self()->getCallingUid())) {
return PERMISSION_DENIED;
}
+ if (checkIMemory(dataMemory) != NO_ERROR) {
+ return BAD_VALUE;
+ }
sp<Module> module = mModule.promote();
if (module == 0) {