Merge "Improve seeking for Xing header without TOC" am: cd17b7453d am: 28301383d3
am: 1204711cd9
Change-Id: If1e9f7aa7c92f5bacdcebea3c7a6be61ea53ba8a
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 4603515..a7ac2d7 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -73,7 +73,7 @@
static bool gMonotonicTime = false; // use system monotonic time for timestamps
static bool gPersistentSurface = false; // use persistent surface
static enum {
- FORMAT_MP4, FORMAT_H264, FORMAT_FRAMES, FORMAT_RAW_FRAMES
+ FORMAT_MP4, FORMAT_H264, FORMAT_WEBM, FORMAT_3GPP, FORMAT_FRAMES, FORMAT_RAW_FRAMES
} gOutputFormat = FORMAT_MP4; // data format for output
static AString gCodecName = ""; // codec name override
static bool gSizeSpecified = false; // was size explicitly requested?
@@ -669,7 +669,9 @@
sp<MediaMuxer> muxer = NULL;
FILE* rawFp = NULL;
switch (gOutputFormat) {
- case FORMAT_MP4: {
+ case FORMAT_MP4:
+ case FORMAT_WEBM:
+ case FORMAT_3GPP: {
// Configure muxer. We have to wait for the CSD blob from the encoder
// before we can start it.
err = unlink(fileName);
@@ -682,7 +684,13 @@
fprintf(stderr, "ERROR: couldn't open file\n");
abort();
}
- muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+ if (gOutputFormat == FORMAT_MP4) {
+ muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+ } else if (gOutputFormat == FORMAT_WEBM) {
+ muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_WEBM);
+ } else {
+ muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_THREE_GPP);
+ }
close(fd);
if (gRotate) {
muxer->setOrientationHint(90); // TODO: does this do anything?
@@ -1002,6 +1010,10 @@
gOutputFormat = FORMAT_MP4;
} else if (strcmp(optarg, "h264") == 0) {
gOutputFormat = FORMAT_H264;
+ } else if (strcmp(optarg, "webm") == 0) {
+ gOutputFormat = FORMAT_WEBM;
+ } else if (strcmp(optarg, "3gpp") == 0) {
+ gOutputFormat = FORMAT_3GPP;
} else if (strcmp(optarg, "frames") == 0) {
gOutputFormat = FORMAT_FRAMES;
} else if (strcmp(optarg, "raw-frames") == 0) {
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 61fc897..d41d3fd 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -78,6 +78,7 @@
static bool gPlaybackAudio;
static bool gWriteMP4;
static bool gDisplayHistogram;
+static bool gVerbose = false;
static bool showProgress = true;
static String8 gWriteMP4Filename;
static String8 gComponentNameOverride;
@@ -159,6 +160,11 @@
break;
}
+ if (gVerbose) {
+ MetaDataBase &meta = mbuf->meta_data();
+ fprintf(stdout, "sample format: %s\n", meta.toString().c_str());
+ }
+
CHECK_EQ(
fwrite((const uint8_t *)mbuf->data() + mbuf->range_offset(),
1,
@@ -348,7 +354,10 @@
decodeTimesUs.push(delayDecodeUs);
}
- if (showProgress && (n++ % 16) == 0) {
+ if (gVerbose) {
+ MetaDataBase &meta = buffer->meta_data();
+ fprintf(stdout, "%ld sample format: %s\n", numFrames, meta.toString().c_str());
+ } else if (showProgress && (n++ % 16) == 0) {
printf(".");
fflush(stdout);
}
@@ -579,12 +588,12 @@
break;
}
+ CHECK(buffer != NULL);
+
if (buffer->range_length() > 0) {
break;
}
- CHECK(buffer != NULL);
-
buffer->release();
buffer = NULL;
}
@@ -630,6 +639,7 @@
fprintf(stderr, " -T allocate buffers from a surface texture\n");
fprintf(stderr, " -d(ump) output_filename (raw stream data to a file)\n");
fprintf(stderr, " -D(ump) output_filename (decoded PCM data to a file)\n");
+ fprintf(stderr, " -v be more verbose\n");
}
static void dumpCodecProfiles(bool queryDecoders) {
@@ -640,7 +650,7 @@
MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9,
- MEDIA_MIMETYPE_VIDEO_DOLBY_VISION
+ MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, MEDIA_MIMETYPE_AUDIO_AC4
};
const char *codecType = queryDecoders? "decoder" : "encoder";
@@ -708,7 +718,7 @@
sp<ALooper> looper;
int res;
- while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
+ while ((res = getopt(argc, argv, "vhaqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
switch (res) {
case 'a':
{
@@ -832,6 +842,12 @@
break;
}
+ case 'v':
+ {
+ gVerbose = true;
+ break;
+ }
+
case '?':
case 'h':
default:
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ca119d5..5f19f74 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -538,6 +538,10 @@
mTimestampMutator.push(timestamp);
}
+ virtual ExtendedTimestamp getTimestamp() const {
+ return mTimestampMutator.last();
+ }
+
// Flushes the shared ring buffer if the client had requested it using mStreaming.mFlush.
// If flush occurs then:
// cblk->u.mStreaming.mFront, ServerProxy::mFlush and ServerProxy::mFlushed will be modified
@@ -551,6 +555,9 @@
// Total count of the number of flushed frames since creation (never reset).
virtual int64_t framesFlushed() const { return mFlushed; }
+ // Safe frames ready query with no side effects.
+ virtual size_t framesReadySafe() const = 0;
+
// Get dynamic buffer size from the shared control block.
uint32_t getBufferSizeInFrames() const {
return android_atomic_acquire_load((int32_t *)&mCblk->mBufferSizeInFrames);
@@ -588,8 +595,7 @@
// which may include non-contiguous frames
virtual size_t framesReady();
- // Safe frames ready query used by dump() - this has no side effects.
- virtual size_t framesReadySafe() const;
+ size_t framesReadySafe() const override; // frames available to read by server.
// Currently AudioFlinger will call framesReady() for a fast track from two threads:
// FastMixer thread, and normal mixer thread. This is dangerous, as the proxy is intended
@@ -693,6 +699,8 @@
return mCblk->u.mStreaming.mRear; // For completeness only; mRear written by server.
}
+ size_t framesReadySafe() const override; // frames available to read by client.
+
protected:
virtual ~AudioRecordServerProxy() { }
};
diff --git a/media/OWNERS b/media/OWNERS
index 1f687a2..1e2d123 100644
--- a/media/OWNERS
+++ b/media/OWNERS
@@ -2,8 +2,10 @@
dwkang@google.com
elaurent@google.com
essick@google.com
+gkasten@google.com
hkuang@google.com
hunga@google.com
+jiabin@google.com
jmtrivi@google.com
krocard@google.com
lajos@google.com
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index 1f2e82f..f1e815b 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -7,6 +7,7 @@
ioprio rt 4
writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
onrestart restart vendor.audio-hal-2-0
+ onrestart restart vendor.audio-hal-4-0-msd
# Keep the original service name for backward compatibility when upgrading
# O-MR1 devices with framework-only.
onrestart restart audio-hal-2-0
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
new file mode 100644
index 0000000..167d474
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -0,0 +1,624 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AC4Parser"
+
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include "AC4Parser.h"
+
+#define BOOLSTR(a) ((a)?"true":"false")
+#define BYTE_ALIGN mBitReader.skipBits(mBitReader.numBitsLeft() % 8)
+#define CHECK_BITS_LEFT(n) if (mBitReader.numBitsLeft() < n) {return false;}
+
+namespace android {
+
+AC4Parser::AC4Parser() {
+}
+
+AC4DSIParser::AC4DSIParser(ABitReader &br)
+ : mBitReader(br){
+
+ mDSISize = mBitReader.numBitsLeft();
+}
+
+// ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+static const char *ChannelModes[] = {
+ "mono",
+ "stereo",
+ "3.0",
+ "5.0",
+ "5.1",
+ "7.0 (3/4/0)",
+ "7.1 (3/4/0.1)",
+ "7.0 (5/2/0)",
+ "7.1 (5/2/0.1)",
+ "7.0 (3/2/2)",
+ "7.1 (3/2/2.1)",
+ "7.0.4",
+ "7.1.4",
+ "9.0.4",
+ "9.1.4",
+ "22.2"
+};
+
+static const char* ContentClassifier[] = {
+ "Complete Main",
+ "Music and Effects",
+ "Visually Impaired",
+ "Hearing Impaired",
+ "Dialog",
+ "Commentary",
+ "Emergency",
+ "Voice Over"
+};
+
+bool AC4DSIParser::parseLanguageTag(uint32_t presentationID, uint32_t substreamID){
+ CHECK_BITS_LEFT(6);
+ uint32_t n_language_tag_bytes = mBitReader.getBits(6);
+ if (n_language_tag_bytes < 2 || n_language_tag_bytes >= 42) {
+ return false;
+ }
+ CHECK_BITS_LEFT(n_language_tag_bytes * 8);
+ char language_tag_bytes[42]; // TS 103 190 part 1 4.3.3.8.7
+ for (uint32_t i = 0; i < n_language_tag_bytes; i++) {
+ language_tag_bytes[i] = (char)mBitReader.getBits(8);
+ }
+ language_tag_bytes[n_language_tag_bytes] = 0;
+ ALOGV("%u.%u: language_tag = %s\n", presentationID, substreamID, language_tag_bytes);
+
+ std::string language(language_tag_bytes, n_language_tag_bytes);
+ mPresentations[presentationID].mLanguage = language;
+
+ return true;
+}
+
+// TS 103 190-1 v1.2.1 E.5 and TS 103 190-2 v1.1.1 E.9
+bool AC4DSIParser::parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID){
+ CHECK_BITS_LEFT(5);
+ uint32_t channel_mode = mBitReader.getBits(5);
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+ CHECK_BITS_LEFT(1);
+ bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: channel_mode = %u (%s)\n", presentationID, substreamID, channel_mode,
+ channel_mode < NELEM(ChannelModes) ? ChannelModes[channel_mode] : "reserved");
+ ALOGV("%u.%u: dsi_sf_multiplier = %u\n", presentationID,
+ substreamID, dsi_sf_multiplier);
+ ALOGV("%u.%u: b_substream_bitrate_indicator = %s\n", presentationID,
+ substreamID, BOOLSTR(b_substream_bitrate_indicator));
+
+ if (b_substream_bitrate_indicator) {
+ CHECK_BITS_LEFT(5);
+ uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+ ALOGV("%u.%u: substream_bitrate_indicator = %u\n", presentationID, substreamID,
+ substream_bitrate_indicator);
+ }
+ if (channel_mode >= 7 && channel_mode <= 10) {
+ CHECK_BITS_LEFT(1);
+ uint32_t add_ch_base = mBitReader.getBits(1);
+ ALOGV("%u.%u: add_ch_base = %u\n", presentationID, substreamID, add_ch_base);
+ }
+ CHECK_BITS_LEFT(1);
+ bool b_content_type = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_content_type = %s\n", presentationID, substreamID, BOOLSTR(b_content_type));
+ if (b_content_type) {
+ CHECK_BITS_LEFT(3);
+ uint32_t content_classifier = mBitReader.getBits(3);
+ ALOGV("%u.%u: content_classifier = %u (%s)\n", presentationID, substreamID,
+ content_classifier, ContentClassifier[content_classifier]);
+
+ // For streams based on TS 103 190 part 1 the presentation level channel_mode doesn't
+ // exist and so we use the channel_mode from either the CM or M&E substream
+ // (they are mutually exclusive)
+ if (mPresentations[presentationID].mChannelMode == -1 &&
+ (content_classifier == 0 || content_classifier == 1)) {
+ mPresentations[presentationID].mChannelMode = channel_mode;
+ }
+ mPresentations[presentationID].mContentClassifier = content_classifier;
+ CHECK_BITS_LEFT(1);
+ bool b_language_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, substreamID,
+ BOOLSTR(b_language_indicator));
+ if (b_language_indicator) {
+ if (!parseLanguageTag(presentationID, substreamID)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+// ETSI TS 103 190-2 v1.1.1 section E.11
+bool AC4DSIParser::parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID)
+{
+ CHECK_BITS_LEFT(1);
+ bool b_substreams_present = (mBitReader.getBits(1) == 1);
+ CHECK_BITS_LEFT(1);
+ bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+ CHECK_BITS_LEFT(1);
+ bool b_channel_coded = (mBitReader.getBits(1) == 1);
+ CHECK_BITS_LEFT(8);
+ uint32_t n_substreams = mBitReader.getBits(8);
+ ALOGV("%u.%u: b_substreams_present = %s\n", presentationID, groupID,
+ BOOLSTR(b_substreams_present));
+ ALOGV("%u.%u: b_hsf_ext = %s\n", presentationID, groupID, BOOLSTR(b_hsf_ext));
+ ALOGV("%u.%u: b_channel_coded = %s\n", presentationID, groupID, BOOLSTR(b_channel_coded));
+ ALOGV("%u.%u: n_substreams = %u\n", presentationID, groupID, n_substreams);
+
+ for (uint32_t i = 0; i < n_substreams; i++) {
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+ CHECK_BITS_LEFT(1);
+ bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u.%u: dsi_sf_multiplier = %u\n", presentationID, groupID, i, dsi_sf_multiplier);
+ ALOGV("%u.%u.%u: b_substream_bitrate_indicator = %s\n", presentationID, groupID, i,
+ BOOLSTR(b_substream_bitrate_indicator));
+
+ if (b_substream_bitrate_indicator) {
+ CHECK_BITS_LEFT(5);
+ uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+ ALOGV("%u.%u.%u: substream_bitrate_indicator = %u\n", presentationID, groupID, i,
+ substream_bitrate_indicator);
+ }
+ if (b_channel_coded) {
+ CHECK_BITS_LEFT(24);
+ uint32_t dsi_substream_channel_mask = mBitReader.getBits(24);
+ ALOGV("%u.%u.%u: dsi_substream_channel_mask = 0x%06x\n", presentationID, groupID, i,
+ dsi_substream_channel_mask);
+ } else {
+ CHECK_BITS_LEFT(1);
+ bool b_ajoc = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u.%u: b_ajoc = %s\n", presentationID, groupID, i, BOOLSTR(b_ajoc));
+ if (b_ajoc) {
+ CHECK_BITS_LEFT(1);
+ bool b_static_dmx = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u.%u: b_static_dmx = %s\n", presentationID, groupID, i,
+ BOOLSTR(b_static_dmx));
+ if (!b_static_dmx) {
+ CHECK_BITS_LEFT(4);
+ uint32_t n_dmx_objects_minus1 = mBitReader.getBits(4);
+ ALOGV("%u.%u.%u: n_dmx_objects_minus1 = %u\n", presentationID, groupID, i,
+ n_dmx_objects_minus1);
+ }
+ CHECK_BITS_LEFT(6);
+ uint32_t n_umx_objects_minus1 = mBitReader.getBits(6);
+ ALOGV("%u.%u.%u: n_umx_objects_minus1 = %u\n", presentationID, groupID, i,
+ n_umx_objects_minus1);
+ }
+ CHECK_BITS_LEFT(4);
+ mBitReader.skipBits(4); // objects_assignment_mask
+ }
+ }
+
+ CHECK_BITS_LEFT(1);
+ bool b_content_type = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_content_type = %s\n", presentationID, groupID, BOOLSTR(b_content_type));
+ if (b_content_type) {
+ CHECK_BITS_LEFT(3);
+ uint32_t content_classifier = mBitReader.getBits(3);
+ ALOGV("%u.%u: content_classifier = %s (%u)\n", presentationID, groupID,
+ ContentClassifier[content_classifier], content_classifier);
+
+ mPresentations[presentationID].mContentClassifier = content_classifier;
+
+ CHECK_BITS_LEFT(1);
+ bool b_language_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, groupID,
+ BOOLSTR(b_language_indicator));
+
+ if (b_language_indicator) {
+ if (!parseLanguageTag(presentationID, groupID)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool AC4DSIParser::parseBitrateDsi() {
+ CHECK_BITS_LEFT(2 + 32 + 32);
+ mBitReader.skipBits(2); // bit_rate_mode
+ mBitReader.skipBits(32); // bit_rate
+ mBitReader.skipBits(32); // bit_rate_precision
+
+ return true;
+}
+
+// TS 103 190-1 section E.4 (ac4_dsi) and TS 103 190-2 section E.6 (ac4_dsi_v1)
+bool AC4DSIParser::parse() {
+ CHECK_BITS_LEFT(3);
+ uint32_t ac4_dsi_version = mBitReader.getBits(3);
+ if (ac4_dsi_version > 1) {
+ ALOGE("error while parsing ac-4 dsi: only versions 0 and 1 are supported");
+ return false;
+ }
+
+ CHECK_BITS_LEFT(7 + 1 + 4 + 9);
+ uint32_t bitstream_version = mBitReader.getBits(7);
+ mBitReader.skipBits(1); // fs_index
+ mBitReader.skipBits(4); // frame_rate_index
+ uint32_t n_presentations = mBitReader.getBits(9);
+
+ int32_t short_program_id = -1;
+ if (bitstream_version > 1) {
+ if (ac4_dsi_version == 0){
+ ALOGE("invalid ac4 dsi");
+ return false;
+ }
+ CHECK_BITS_LEFT(1);
+ bool b_program_id = (mBitReader.getBits(1) == 1);
+ if (b_program_id) {
+ CHECK_BITS_LEFT(16 + 1);
+ short_program_id = mBitReader.getBits(16);
+ bool b_uuid = (mBitReader.getBits(1) == 1);
+ if (b_uuid) {
+ const uint32_t kAC4UUIDSizeInBytes = 16;
+ char program_uuid[kAC4UUIDSizeInBytes];
+ CHECK_BITS_LEFT(kAC4UUIDSizeInBytes * 8);
+ for (uint32_t i = 0; i < kAC4UUIDSizeInBytes; i++) {
+ program_uuid[i] = (char)(mBitReader.getBits(8));
+ }
+ ALOGV("UUID = %s", program_uuid);
+ }
+ }
+ }
+
+ if (ac4_dsi_version == 1) {
+ if (!parseBitrateDsi()) {
+ return false;
+ }
+ BYTE_ALIGN;
+ }
+
+ for (uint32_t presentation = 0; presentation < n_presentations; presentation++) {
+ mPresentations[presentation].mProgramID = short_program_id;
+ // known as b_single_substream in ac4_dsi_version 0
+ bool b_single_substream_group = false;
+ uint32_t presentation_config = 0, presentation_version = 0;
+ uint32_t pres_bytes = 0;
+
+ if (ac4_dsi_version == 0) {
+ CHECK_BITS_LEFT(1 + 5 + 5);
+ b_single_substream_group = (mBitReader.getBits(1) == 1);
+ presentation_config = mBitReader.getBits(5);
+ presentation_version = mBitReader.getBits(5);
+ } else if (ac4_dsi_version == 1) {
+ CHECK_BITS_LEFT(8 + 8);
+ presentation_version = mBitReader.getBits(8);
+ pres_bytes = mBitReader.getBits(8);
+ if (pres_bytes == 0xff) {
+ CHECK_BITS_LEFT(16);
+ pres_bytes += mBitReader.getBits(16);
+ }
+ ALOGV("%u: pres_bytes = %u\n", presentation, pres_bytes);
+ if (presentation_version > 1) {
+ CHECK_BITS_LEFT(pres_bytes * 8);
+ mBitReader.skipBits(pres_bytes * 8);
+ continue;
+ }
+ // ac4_presentation_v0_dsi() and ac4_presentation_v1_dsi() both
+ // start with a presentation_config of 5 bits
+ CHECK_BITS_LEFT(5);
+ presentation_config = mBitReader.getBits(5);
+ b_single_substream_group = (presentation_config == 0x1f);
+ }
+
+ static const char *PresentationConfig[] = {
+ "Music&Effects + Dialog",
+ "Main + DE",
+ "Main + Associate",
+ "Music&Effects + Dialog + Associate",
+ "Main + DE + Associate",
+ "Arbitrary substream groups",
+ "EMDF only"
+ };
+ ALOGV("%u: b_single_substream/group = %s\n", presentation,
+ BOOLSTR(b_single_substream_group));
+ ALOGV("%u: presentation_version = %u\n", presentation, presentation_version);
+ ALOGV("%u: presentation_config = %u (%s)\n", presentation, presentation_config,
+ (presentation_config >= NELEM(PresentationConfig) ?
+ "reserved" : PresentationConfig[presentation_config]));
+
+ /* record a marker, less the size of the presentation_config */
+ uint64_t start = (mDSISize - mBitReader.numBitsLeft()) / 8;
+
+ bool b_add_emdf_substreams = false;
+ if (!b_single_substream_group && presentation_config == 6) {
+ b_add_emdf_substreams = true;
+ ALOGV("%u: b_add_emdf_substreams = %s\n", presentation, BOOLSTR(b_add_emdf_substreams));
+ } else {
+ CHECK_BITS_LEFT(3 + 1);
+ uint32_t mdcompat = mBitReader.getBits(3);
+ ALOGV("%u: mdcompat = %d\n", presentation, mdcompat);
+
+ bool b_presentation_group_index = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_presentation_group_index = %s\n", presentation,
+ BOOLSTR(b_presentation_group_index));
+ if (b_presentation_group_index) {
+ CHECK_BITS_LEFT(5);
+ mPresentations[presentation].mGroupIndex = mBitReader.getBits(5);
+ ALOGV("%u: presentation_group_index = %d\n", presentation,
+ mPresentations[presentation].mGroupIndex);
+ }
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_frame_rate_multiply_info = mBitReader.getBits(2);
+ ALOGV("%u: dsi_frame_rate_multiply_info = %d\n", presentation,
+ dsi_frame_rate_multiply_info);
+ if (ac4_dsi_version == 1 && presentation_version == 1) {
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_frame_rate_fraction_info = mBitReader.getBits(2);
+ ALOGV("%u: dsi_frame_rate_fraction_info = %d\n", presentation,
+ dsi_frame_rate_fraction_info);
+ }
+ CHECK_BITS_LEFT(5 + 10);
+ uint32_t presentation_emdf_version = mBitReader.getBits(5);
+ uint32_t presentation_key_id = mBitReader.getBits(10);
+ ALOGV("%u: presentation_emdf_version = %d\n", presentation, presentation_emdf_version);
+ ALOGV("%u: presentation_key_id = %d\n", presentation, presentation_key_id);
+
+ if (ac4_dsi_version == 1) {
+ bool b_presentation_channel_coded = false;
+ if (presentation_version == 0) {
+ b_presentation_channel_coded = true;
+ } else {
+ CHECK_BITS_LEFT(1);
+ b_presentation_channel_coded = (mBitReader.getBits(1) == 1);
+ }
+ ALOGV("%u: b_presentation_channel_coded = %s\n", presentation,
+ BOOLSTR(b_presentation_channel_coded));
+ if (b_presentation_channel_coded) {
+ if (presentation_version == 1) {
+ CHECK_BITS_LEFT(5);
+ uint32_t dsi_presentation_ch_mode = mBitReader.getBits(5);
+ mPresentations[presentation].mChannelMode = dsi_presentation_ch_mode;
+ ALOGV("%u: dsi_presentation_ch_mode = %d (%s)\n", presentation,
+ dsi_presentation_ch_mode,
+ dsi_presentation_ch_mode < NELEM(ChannelModes) ?
+ ChannelModes[dsi_presentation_ch_mode] : "reserved");
+
+ if (dsi_presentation_ch_mode >= 11 && dsi_presentation_ch_mode <= 14) {
+ CHECK_BITS_LEFT(1 + 2);
+ uint32_t pres_b_4_back_channels_present = mBitReader.getBits(1);
+ uint32_t pres_top_channel_pairs = mBitReader.getBits(2);
+ ALOGV("%u: pres_b_4_back_channels_present = %s\n", presentation,
+ BOOLSTR(pres_b_4_back_channels_present));
+ ALOGV("%u: pres_top_channel_pairs = %d\n", presentation,
+ pres_top_channel_pairs);
+ }
+ }
+ // presentation_channel_mask in ac4_presentation_v0_dsi()
+ CHECK_BITS_LEFT(24);
+ uint32_t presentation_channel_mask_v1 = mBitReader.getBits(24);
+ ALOGV("%u: presentation_channel_mask_v1 = 0x%06x\n", presentation,
+ presentation_channel_mask_v1);
+ }
+ if (presentation_version == 1) {
+ CHECK_BITS_LEFT(1);
+ bool b_presentation_core_differs = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_presentation_core_differs = %s\n", presentation,
+ BOOLSTR(b_presentation_core_differs));
+ if (b_presentation_core_differs) {
+ CHECK_BITS_LEFT(1);
+ bool b_presentation_core_channel_coded = (mBitReader.getBits(1) == 1);
+ if (b_presentation_core_channel_coded) {
+ CHECK_BITS_LEFT(2);
+ mBitReader.skipBits(2); // dsi_presentation_channel_mode_core
+ }
+ }
+ CHECK_BITS_LEFT(1);
+ bool b_presentation_filter = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_presentation_filter = %s\n", presentation,
+ BOOLSTR(b_presentation_filter));
+ if (b_presentation_filter) {
+ CHECK_BITS_LEFT(1 + 8);
+ bool b_enable_presentation = (mBitReader.getBits(1) == 1);
+ if (!b_enable_presentation) {
+ mPresentations[presentation].mEnabled = false;
+ }
+ ALOGV("%u: b_enable_presentation = %s\n", presentation,
+ BOOLSTR(b_enable_presentation));
+ uint32_t n_filter_bytes = mBitReader.getBits(8);
+ CHECK_BITS_LEFT(n_filter_bytes * 8);
+ for (uint32_t i = 0; i < n_filter_bytes; i++) {
+ mBitReader.skipBits(8); // filter_data
+ }
+ }
+ }
+ } /* ac4_dsi_version == 1 */
+
+ if (b_single_substream_group) {
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ } else {
+ if (!parseSubstreamGroupDSI(presentation, 0)) {
+ return false;
+ }
+ }
+ } else {
+ if (ac4_dsi_version == 1) {
+ CHECK_BITS_LEFT(1);
+ bool b_multi_pid = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_multi_pid = %s\n", presentation, BOOLSTR(b_multi_pid));
+ } else {
+ CHECK_BITS_LEFT(1);
+ bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_hsf_ext = %s\n", presentation, BOOLSTR(b_hsf_ext));
+ }
+ switch (presentation_config) {
+ case 0:
+ case 1:
+ case 2:
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamDSI(presentation, 1)) {
+ return false;
+ }
+ } else {
+ if (!parseSubstreamGroupDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamGroupDSI(presentation, 1)) {
+ return false;
+ }
+ }
+ break;
+ case 3:
+ case 4:
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamDSI(presentation, 1)) {
+ return false;
+ }
+ if (!parseSubstreamDSI(presentation, 2)) {
+ return false;
+ }
+ } else {
+ if (!parseSubstreamGroupDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamGroupDSI(presentation, 1)) {
+ return false;
+ }
+ if (!parseSubstreamGroupDSI(presentation, 2)) {
+ return false;
+ }
+ }
+ break;
+ case 5:
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ } else {
+ CHECK_BITS_LEFT(3);
+ uint32_t n_substream_groups_minus2 = mBitReader.getBits(3);
+ ALOGV("%u: n_substream_groups_minus2 = %d\n", presentation,
+ n_substream_groups_minus2);
+ for (uint32_t sg = 0; sg < n_substream_groups_minus2 + 2; sg++) {
+ if (!parseSubstreamGroupDSI(presentation, sg)) {
+ return false;
+ }
+ }
+ }
+ break;
+ default:
+ CHECK_BITS_LEFT(7);
+ uint32_t n_skip_bytes = mBitReader.getBits(7);
+ CHECK_BITS_LEFT(n_skip_bytes * 8)
+ for (uint32_t j = 0; j < n_skip_bytes; j++) {
+ mBitReader.getBits(8);
+ }
+ break;
+ }
+ CHECK_BITS_LEFT(1 + 1);
+ bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
+ mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
+ b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
+ ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
+ BOOLSTR(b_add_emdf_substreams));
+ }
+ }
+ if (b_add_emdf_substreams) {
+ CHECK_BITS_LEFT(7);
+ uint32_t n_add_emdf_substreams = mBitReader.getBits(7);
+ for (uint32_t j = 0; j < n_add_emdf_substreams; j++) {
+ CHECK_BITS_LEFT(5 + 10);
+ uint32_t substream_emdf_version = mBitReader.getBits(5);
+ uint32_t substream_key_id = mBitReader.getBits(10);
+ ALOGV("%u: emdf_substream[%d]: version=%d, key_id=%d\n", presentation, j,
+ substream_emdf_version, substream_key_id);
+ }
+ }
+
+ bool b_presentation_bitrate_info = false;
+ if (presentation_version > 0) {
+ CHECK_BITS_LEFT(1);
+ b_presentation_bitrate_info = (mBitReader.getBits(1) == 1);
+ }
+
+ ALOGV("b_presentation_bitrate_info = %s\n", BOOLSTR(b_presentation_bitrate_info));
+ if (b_presentation_bitrate_info) {
+ if (!parseBitrateDsi()) {
+ return false;
+ }
+ }
+
+ if (presentation_version > 0) {
+ CHECK_BITS_LEFT(1);
+ bool b_alternative = (mBitReader.getBits(1) == 1);
+ ALOGV("b_alternative = %s\n", BOOLSTR(b_alternative));
+ if (b_alternative) {
+ BYTE_ALIGN;
+ CHECK_BITS_LEFT(16);
+ uint32_t name_len = mBitReader.getBits(16);
+ char* presentation_name = new char[name_len+1];
+ CHECK_BITS_LEFT(name_len * 8);
+ for (uint32_t i = 0; i < name_len; i++) {
+ presentation_name[i] = (char)(mBitReader.getBits(8));
+ }
+ presentation_name[name_len] = '\0';
+ std::string description(presentation_name, name_len);
+ mPresentations[presentation].mDescription = description;
+ CHECK_BITS_LEFT(5);
+ uint32_t n_targets = mBitReader.getBits(5);
+ CHECK_BITS_LEFT(n_targets * (3 + 8));
+ for (uint32_t i = 0; i < n_targets; i++){
+ mBitReader.skipBits(3); // target_md_compat
+ mBitReader.skipBits(8); // target_device_category
+ }
+ }
+ }
+
+ BYTE_ALIGN;
+
+ if (ac4_dsi_version == 1) {
+ uint64_t end = (mDSISize - mBitReader.numBitsLeft()) / 8;
+ if (mBitReader.numBitsLeft() % 8 != 0) {
+ end += 1;
+ }
+
+ uint64_t presentation_bytes = end - start;
+ uint64_t skip_bytes = pres_bytes - presentation_bytes;
+ ALOGV("skipping = %" PRIu64 " bytes", skip_bytes);
+ CHECK_BITS_LEFT(skip_bytes * 8);
+ mBitReader.skipBits(skip_bytes * 8);
+ }
+
+ // we should know this or something is probably wrong
+ // with the bitstream (or we don't support it)
+ if (mPresentations[presentation].mChannelMode == -1){
+ ALOGE("could not determing channel mode of presentation %d", presentation);
+ return false;
+ }
+ } /* each presentation */
+
+ return true;
+}
+
+};
diff --git a/media/extractors/mp4/AC4Parser.h b/media/extractors/mp4/AC4Parser.h
new file mode 100644
index 0000000..73b6e31
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AC4_PARSER_H_
+#define AC4_PARSER_H_
+
+#include <cstdint>
+#include <map>
+#include <string>
+
+#include <media/stagefright/foundation/ABitReader.h>
+
+namespace android {
+
+class AC4Parser {
+public:
+ AC4Parser();
+ virtual ~AC4Parser() { }
+
+ virtual bool parse() = 0;
+
+ struct AC4Presentation {
+ int32_t mChannelMode = -1;
+ int32_t mProgramID = -1;
+ int32_t mGroupIndex = -1;
+
+ // TS 103 190-1 v1.2.1 4.3.3.8.1
+ enum ContentClassifiers {
+ kCompleteMain,
+ kMusicAndEffects,
+ kVisuallyImpaired,
+ kHearingImpaired,
+ kDialog,
+ kCommentary,
+ kEmergency,
+ kVoiceOver
+ };
+
+ uint32_t mContentClassifier = kCompleteMain;
+
+ // ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+ enum InputChannelMode {
+ kChannelMode_Mono,
+ kChannelMode_Stereo,
+ kChannelMode_3_0,
+ kChannelMode_5_0,
+ kChannelMode_5_1,
+ kChannelMode_7_0_34,
+ kChannelMode_7_1_34,
+ kChannelMode_7_0_52,
+ kChannelMode_7_1_52,
+ kChannelMode_7_0_322,
+ kChannelMode_7_1_322,
+ kChannelMode_7_0_4,
+ kChannelMode_7_1_4,
+ kChannelMode_9_0_4,
+ kChannelMode_9_1_4,
+ kChannelMode_22_2,
+ kChannelMode_Reserved,
+ };
+
+ bool mHasDialogEnhancements = false;
+ bool mPreVirtualized = false;
+ bool mEnabled = true;
+
+ std::string mLanguage;
+ std::string mDescription;
+ };
+ typedef std::map<uint32_t, AC4Presentation> AC4Presentations;
+
+ const AC4Presentations& getPresentations() const { return mPresentations; }
+
+protected:
+ AC4Presentations mPresentations;
+};
+
+class AC4DSIParser: public AC4Parser {
+public:
+ explicit AC4DSIParser(ABitReader &br);
+ virtual ~AC4DSIParser() { }
+
+ bool parse();
+
+private:
+ bool parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID);
+ bool parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID);
+ bool parseLanguageTag(uint32_t presentationID, uint32_t substreamID);
+ bool parseBitrateDsi();
+
+ uint64_t mDSISize;
+ ABitReader& mBitReader;
+};
+
+};
+
+#endif // AC4_PARSER_H_
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index fa739e8..40b2c97 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -2,6 +2,7 @@
name: "libmp4extractor_defaults",
srcs: [
+ "AC4Parser.cpp",
"ItemTable.cpp",
"MPEG4Extractor.cpp",
"SampleIterator.cpp",
diff --git a/media/extractors/mp4/ItemTable.cpp b/media/extractors/mp4/ItemTable.cpp
index ca9deab..be442e6 100644
--- a/media/extractors/mp4/ItemTable.cpp
+++ b/media/extractors/mp4/ItemTable.cpp
@@ -1529,12 +1529,16 @@
if (thumbItemIndex >= 0) {
const ImageItem &thumbnail = mItemIdToItemMap[thumbItemIndex];
- meta->setInt32(kKeyThumbnailWidth, thumbnail.width);
- meta->setInt32(kKeyThumbnailHeight, thumbnail.height);
- meta->setData(kKeyThumbnailHVCC, kTypeHVCC,
- thumbnail.hvcc->data(), thumbnail.hvcc->size());
- ALOGV("image[%u]: thumbnail: size %dx%d, item index %zd",
- imageIndex, thumbnail.width, thumbnail.height, thumbItemIndex);
+ if (thumbnail.hvcc != NULL) {
+ meta->setInt32(kKeyThumbnailWidth, thumbnail.width);
+ meta->setInt32(kKeyThumbnailHeight, thumbnail.height);
+ meta->setData(kKeyThumbnailHVCC, kTypeHVCC,
+ thumbnail.hvcc->data(), thumbnail.hvcc->size());
+ ALOGV("image[%u]: thumbnail: size %dx%d, item index %zd",
+ imageIndex, thumbnail.width, thumbnail.height, thumbItemIndex);
+ } else {
+ ALOGW("%s: thumbnail data is missing for image[%u]!", __FUNCTION__, imageIndex);
+ }
} else {
ALOGW("%s: Referenced thumbnail does not exist!", __FUNCTION__);
}
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 7b3b81d..8412812 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -26,6 +26,7 @@
#include <utils/Log.h>
+#include "AC4Parser.h"
#include "MPEG4Extractor.h"
#include "SampleTable.h"
#include "ItemTable.h"
@@ -125,6 +126,8 @@
bool mIsAVC;
bool mIsHEVC;
+ bool mIsAC4;
+
size_t mNALLengthSize;
bool mStarted;
@@ -324,6 +327,8 @@
case FOURCC('h', 'v', 'c', '1'):
case FOURCC('h', 'e', 'v', '1'):
return MEDIA_MIMETYPE_VIDEO_HEVC;
+ case FOURCC('a', 'c', '-', '4'):
+ return MEDIA_MIMETYPE_AUDIO_AC4;
default:
ALOGW("Unknown fourcc: %c%c%c%c",
(fourcc >> 24) & 0xff,
@@ -2436,6 +2441,12 @@
return parseAC3SampleEntry(data_offset);
}
+ case FOURCC('a', 'c', '-', '4'):
+ {
+ *offset += chunk_size;
+ return parseAC4SampleEntry(data_offset);
+ }
+
case FOURCC('f', 't', 'y', 'p'):
{
if (chunk_data_size < 8 || depth != 0) {
@@ -2507,6 +2518,84 @@
return OK;
}
+status_t MPEG4Extractor::parseAC4SampleEntry(off64_t offset) {
+ // skip 16 bytes:
+ // + 6-byte reserved,
+ // + 2-byte data reference index,
+ // + 8-byte reserved
+ offset += 16;
+ uint16_t channelCount;
+ if (!mDataSource->getUInt16(offset, &channelCount)) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 block: cannot read channel count");
+ return ERROR_MALFORMED;
+ }
+ // skip 8 bytes:
+ // + 2-byte channelCount,
+ // + 2-byte sample size,
+ // + 4-byte reserved
+ offset += 8;
+ uint16_t sampleRate;
+ if (!mDataSource->getUInt16(offset, &sampleRate)) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 block: cannot read sample rate");
+ return ERROR_MALFORMED;
+ }
+
+ // skip 4 bytes:
+ // + 2-byte sampleRate,
+ // + 2-byte reserved
+ offset += 4;
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+ mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+ mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
+ mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+ return parseAC4SpecificBox(offset);
+}
+
+status_t MPEG4Extractor::parseAC4SpecificBox(off64_t offset) {
+ uint32_t size;
+ // + 4-byte size
+ // + 4-byte type
+ // + 3-byte payload
+ const uint32_t kAC4MinimumBoxSize = 4 + 4 + 3;
+ if (!mDataSource->getUInt32(offset, &size) || size < kAC4MinimumBoxSize) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 block: cannot read specific box size");
+ return ERROR_MALFORMED;
+ }
+
+ // + 4-byte size
+ offset += 4;
+ uint32_t type;
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '4')) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 specific block: header not dac4");
+ return ERROR_MALFORMED;
+ }
+
+ // + 4-byte type
+ offset += 4;
+ // at least for AC4 DSI v1 this is big enough
+ const uint32_t kAC4SpecificBoxPayloadSize = 256;
+ uint8_t chunk[kAC4SpecificBoxPayloadSize];
+ ssize_t dsiSize = size - 8; // size of box - size and type fields
+ if (dsiSize >= (ssize_t)kAC4SpecificBoxPayloadSize ||
+ mDataSource->readAt(offset, chunk, dsiSize) != dsiSize) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 specific block: bitstream fields");
+ return ERROR_MALFORMED;
+ }
+ // + size-byte payload
+ offset += dsiSize;
+ ABitReader br(chunk, dsiSize);
+ AC4DSIParser parser(br);
+ if (!parser.parse()){
+ ALOGE("MPEG4Extractor: error while parsing ac-4 specific block");
+ return ERROR_MALFORMED;
+ }
+
+ return OK;
+}
+
status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
// skip 16 bytes:
// + 6-byte reserved,
@@ -3857,6 +3946,7 @@
mCurrentSampleInfoOffsets(NULL),
mIsAVC(false),
mIsHEVC(false),
+ mIsAC4(false),
mNALLengthSize(0),
mStarted(false),
mGroup(NULL),
@@ -3890,6 +3980,7 @@
mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+ mIsAC4 = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4);
if (mIsAVC) {
uint32_t type;
@@ -4830,7 +4921,7 @@
}
}
- if ((!mIsAVC && !mIsHEVC) || mWantsNALFragments) {
+ if ((!mIsAVC && !mIsHEVC && !mIsAC4) || mWantsNALFragments) {
if (newBuffer) {
ssize_t num_bytes_read =
mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
@@ -4862,13 +4953,20 @@
++mCurrentSampleIndex;
}
- if (!mIsAVC && !mIsHEVC) {
+ if (!mIsAVC && !mIsHEVC && !mIsAC4) {
*out = mBuffer;
mBuffer = NULL;
return OK;
}
+ if (mIsAC4) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
// Each NAL unit is split up into its constituent fragments and
// each one of them returned in its own buffer.
@@ -4907,6 +5005,58 @@
*out = clone;
return OK;
+ } else if (mIsAC4) {
+ CHECK(mBuffer != NULL);
+ // Make sure there is enough space to write the sync header and the raw frame
+ if (mBuffer->range_length() < (7 + size)) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ uint8_t *dstData = (uint8_t *)mBuffer->data();
+ size_t dstOffset = 0;
+ // Add AC-4 sync header to MPEG4 encapsulated AC-4 raw frame
+ // AC40 sync word, meaning no CRC at the end of the frame
+ dstData[dstOffset++] = 0xAC;
+ dstData[dstOffset++] = 0x40;
+ dstData[dstOffset++] = 0xFF;
+ dstData[dstOffset++] = 0xFF;
+ dstData[dstOffset++] = (uint8_t)((size >> 16) & 0xFF);
+ dstData[dstOffset++] = (uint8_t)((size >> 8) & 0xFF);
+ dstData[dstOffset++] = (uint8_t)((size >> 0) & 0xFF);
+
+ ssize_t numBytesRead = mDataSource->readAt(offset, dstData + dstOffset, size);
+ if (numBytesRead != (ssize_t)size) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ mBuffer->set_range(0, dstOffset + size);
+ mBuffer->meta_data().clear();
+ mBuffer->meta_data().setInt64(
+ kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data().setInt64(
+ kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+
+ if (targetSampleTimeUs >= 0) {
+ mBuffer->meta_data().setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
+ if (isSyncSample) {
+ mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ }
+
+ ++mCurrentSampleIndex;
+
+ *out = mBuffer;
+ mBuffer = NULL;
+
+ return OK;
} else {
// Whole NAL units are returned but each fragment is prefixed by
// the start code (0x00 00 00 01).
@@ -5361,6 +5511,8 @@
return OK;
}
+
+ return OK;
}
MPEG4Extractor::Track *MPEG4Extractor::findTrackByMimePrefix(
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 3ea0963..ed70aa7 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -141,6 +141,8 @@
status_t parseAC3SampleEntry(off64_t offset);
status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+ status_t parseAC4SampleEntry(off64_t offset);
+ status_t parseAC4SpecificBox(off64_t offset);
MPEG4Extractor(const MPEG4Extractor &);
MPEG4Extractor &operator=(const MPEG4Extractor &);
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index 88d7401..0e61589 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -272,7 +272,9 @@
if (strlen(arg) > 2) {
policy = atoi(&arg[2]);
}
- AAudio_setMMapPolicy(policy);
+ if (!AAudio_setMMapPolicy(policy)) {
+ printf("ERROR: invalid MMAP policy mode %i\n", policy);
+ }
} break;
case 'n':
setNumberOfBursts(atoi(&arg[2]));
@@ -363,7 +365,7 @@
mode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
break;
default:
- printf("ERROR invalid performance mode %c\n", c);
+ printf("ERROR: invalid performance mode %c\n", c);
break;
}
return mode;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 3a7a578..4ef765d 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -150,6 +150,11 @@
allowMMap = false;
}
+ if (!allowMMap && !allowLegacy) {
+ ALOGE("%s() no backend available: neither MMAP nor legacy path are allowed", __func__);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+
result = builder_createStream(getDirection(), sharingMode, allowMMap, &audioStream);
if (result == AAUDIO_OK) {
// Open the stream using the parameters from the builder.
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 2df37a8..6146c0e 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -49,6 +49,7 @@
"libaudiomanager",
"libmedia_helper",
"libmediametrics",
+ "libmediautils",
],
export_shared_lib_headers: ["libbinder"],
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index c072901..cb4bcfc 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -878,31 +878,25 @@
flags, selectedDeviceId, portId);
}
-status_t AudioSystem::startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioSystem::startOutput(audio_port_handle_t portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->startOutput(output, stream, session);
+ return aps->startOutput(portId);
}
-status_t AudioSystem::stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioSystem::stopOutput(audio_port_handle_t portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->stopOutput(output, stream, session);
+ return aps->stopOutput(portId);
}
-void AudioSystem::releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioSystem::releaseOutput(audio_port_handle_t portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return;
- aps->releaseOutput(output, stream, session);
+ aps->releaseOutput(portId);
}
status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index dced3c4..a018b22 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -1231,6 +1231,21 @@
return 0;
}
+__attribute__((no_sanitize("integer")))
+size_t AudioRecordServerProxy::framesReadySafe() const
+{
+ if (mIsShutdown) {
+ return 0;
+ }
+ const int32_t front = android_atomic_acquire_load(&mCblk->u.mStreaming.mFront);
+ const int32_t rear = mCblk->u.mStreaming.mRear;
+ const ssize_t filled = rear - front;
+ if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+ return 0; // error condition, silently return 0.
+ }
+ return filled;
+}
+
// ---------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 00af7e8..37c62a8 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -24,10 +24,8 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
-#include <cutils/multiuser.h>
#include <media/TimeCheck.h>
-#include <private/android_filesystem_config.h>
-
+#include <mediautils/ServiceUtilities.h>
#include "IAudioFlinger.h"
namespace android {
@@ -912,7 +910,7 @@
case SET_MIC_MUTE:
case SET_LOW_RAM_DEVICE:
case SYSTEM_READY: {
- if (multiuser_get_app_id(IPCThreadState::self()->getCallingUid()) >= AID_APP_START) {
+ if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index a1236e7..9d77376 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -24,11 +24,10 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
-#include <cutils/multiuser.h>
#include <media/AudioEffect.h>
#include <media/IAudioPolicyService.h>
#include <media/TimeCheck.h>
-#include <private/android_filesystem_config.h>
+#include <mediautils/ServiceUtilities.h>
#include <system/audio.h>
namespace android {
@@ -245,41 +244,29 @@
return status;
}
- virtual status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+ virtual status_t startOutput(audio_port_handle_t portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(output);
- data.writeInt32((int32_t) stream);
- data.writeInt32((int32_t) session);
+ data.writeInt32((int32_t)portId);
remote()->transact(START_OUTPUT, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
- virtual status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+ virtual status_t stopOutput(audio_port_handle_t portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(output);
- data.writeInt32((int32_t) stream);
- data.writeInt32((int32_t) session);
+ data.writeInt32((int32_t)portId);
remote()->transact(STOP_OUTPUT, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
- virtual void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+ virtual void releaseOutput(audio_port_handle_t portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(output);
- data.writeInt32((int32_t)stream);
- data.writeInt32((int32_t)session);
+ data.writeInt32((int32_t)portId);
remote()->transact(RELEASE_OUTPUT, data, &reply);
}
@@ -936,7 +923,7 @@
case STOP_AUDIO_SOURCE:
case GET_SURROUND_FORMATS:
case SET_SURROUND_FORMAT_ENABLED: {
- if (multiuser_get_app_id(IPCThreadState::self()->getCallingUid()) >= AID_APP_START) {
+ if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
@@ -1075,34 +1062,22 @@
case START_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
- audio_stream_type_t stream =
- static_cast <audio_stream_type_t>(data.readInt32());
- audio_session_t session = (audio_session_t)data.readInt32();
- reply->writeInt32(static_cast <uint32_t>(startOutput(output,
- stream,
- session)));
+ const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+ reply->writeInt32(static_cast <uint32_t>(startOutput(portId)));
return NO_ERROR;
} break;
case STOP_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
- audio_stream_type_t stream =
- static_cast <audio_stream_type_t>(data.readInt32());
- audio_session_t session = (audio_session_t)data.readInt32();
- reply->writeInt32(static_cast <uint32_t>(stopOutput(output,
- stream,
- session)));
+ const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+ reply->writeInt32(static_cast <uint32_t>(stopOutput(portId)));
return NO_ERROR;
} break;
case RELEASE_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
- audio_stream_type_t stream = (audio_stream_type_t)data.readInt32();
- audio_session_t session = (audio_session_t)data.readInt32();
- releaseOutput(output, stream, session);
+ const audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+ releaseOutput(portId);
return NO_ERROR;
} break;
diff --git a/media/libaudioclient/include/media/AudioParameter.h b/media/libaudioclient/include/media/AudioParameter.h
index 967d895..24837e3 100644
--- a/media/libaudioclient/include/media/AudioParameter.h
+++ b/media/libaudioclient/include/media/AudioParameter.h
@@ -64,6 +64,9 @@
static const char * const keyPresentationId;
static const char * const keyProgramId;
+ // keyAudioLanguagePreferred: Preferred audio language
+ static const char * const keyAudioLanguagePreferred;
+
// keyStreamConnect / Disconnect: value is an int in audio_devices_t
static const char * const keyStreamConnect;
static const char * const keyStreamDisconnect;
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 4c0f796..10d6e92 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -224,15 +224,9 @@
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId);
- static status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- static status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- static void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
+ static status_t startOutput(audio_port_handle_t portId);
+ static status_t stopOutput(audio_port_handle_t portId);
+ static void releaseOutput(audio_port_handle_t portId);
// Client must successfully hand off the handle reference to AudioFlinger via createRecord(),
// or release it with releaseInput().
diff --git a/media/libaudioclient/include/media/AudioTimestamp.h b/media/libaudioclient/include/media/AudioTimestamp.h
index 498de8e..e5925dd 100644
--- a/media/libaudioclient/include/media/AudioTimestamp.h
+++ b/media/libaudioclient/include/media/AudioTimestamp.h
@@ -135,8 +135,23 @@
return INVALID_OPERATION;
}
+ double getOutputServerLatencyMs(uint32_t sampleRate) const {
+ return getLatencyMs(sampleRate, LOCATION_SERVER, LOCATION_KERNEL);
+ }
+
+ double getLatencyMs(uint32_t sampleRate, Location location1, Location location2) const {
+ if (sampleRate > 0 && mTimeNs[location1] > 0 && mTimeNs[location2] > 0) {
+ const int64_t frameDifference =
+ mPosition[location1] - mPosition[location2];
+ const int64_t timeDifferenceNs =
+ mTimeNs[location1] - mTimeNs[location2];
+ return ((double)frameDifference * 1e9 / sampleRate - timeDifferenceNs) * 1e-6;
+ }
+ return 0.;
+ }
+
// convert fields to a printable string
- std::string toString() {
+ std::string toString() const {
std::stringstream ss;
ss << "BOOTTIME offset " << mTimebaseOffset[TIMEBASE_BOOTTIME] << "\n";
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index c3876af..6c017a3 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -66,15 +66,9 @@
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId) = 0;
- virtual status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
- virtual status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
- virtual void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
+ virtual status_t startOutput(audio_port_handle_t portId) = 0;
+ virtual status_t stopOutput(audio_port_handle_t portId) = 0;
+ virtual void releaseOutput(audio_port_handle_t portId) = 0;
virtual status_t getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
diff --git a/media/libaudiohal/2.0/Android.bp b/media/libaudiohal/2.0/Android.bp
deleted file mode 100644
index 574b435..0000000
--- a/media/libaudiohal/2.0/Android.bp
+++ /dev/null
@@ -1,54 +0,0 @@
-cc_library_shared {
- name: "libaudiohal@2.0",
-
- srcs: [
- "DeviceHalLocal.cpp",
- "DevicesFactoryHalHybrid.cpp",
- "DevicesFactoryHalLocal.cpp",
- "StreamHalLocal.cpp",
-
- "ConversionHelperHidl.cpp",
- "DeviceHalHidl.cpp",
- "DevicesFactoryHalHidl.cpp",
- "EffectBufferHalHidl.cpp",
- "EffectHalHidl.cpp",
- "EffectsFactoryHalHidl.cpp",
- "StreamHalHidl.cpp",
- ],
-
- export_include_dirs: ["."],
-
- cflags: [
- "-Wall",
- "-Werror",
- ],
- shared_libs: [
- "libaudiohal_deathhandler",
- "libaudioutils",
- "libcutils",
- "liblog",
- "libutils",
- "libhardware",
- "libbase",
- "libfmq",
- "libhwbinder",
- "libhidlbase",
- "libhidlmemory",
- "libhidltransport",
- "android.hardware.audio@2.0",
- "android.hardware.audio.common@2.0",
- "android.hardware.audio.common@2.0-util",
- "android.hardware.audio.effect@2.0",
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
- "libmedia_helper",
- "libmediautils",
- ],
- header_libs: [
- "libaudiohal_headers"
- ],
-
- export_shared_lib_headers: [
- "libfmq",
- ],
-}
diff --git a/media/libaudiohal/2.0/ConversionHelperHidl.cpp b/media/libaudiohal/2.0/ConversionHelperHidl.cpp
deleted file mode 100644
index f60bf8b..0000000
--- a/media/libaudiohal/2.0/ConversionHelperHidl.cpp
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "HalHidl"
-#include <media/AudioParameter.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-
-using ::android::hardware::audio::V2_0::Result;
-
-namespace android {
-
-// static
-status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
- AudioParameter halKeys(keys);
- if (halKeys.size() == 0) return BAD_VALUE;
- hidlKeys->resize(halKeys.size());
- //FIXME: keyStreamSupportedChannels and keyStreamSupportedSamplingRates come with a
- // "keyFormat=<value>" pair. We need to transform it into a single key string so that it is
- // carried over to the legacy HAL via HIDL.
- String8 value;
- bool keepFormatValue = halKeys.size() == 2 &&
- (halKeys.get(String8(AudioParameter::keyStreamSupportedChannels), value) == NO_ERROR ||
- halKeys.get(String8(AudioParameter::keyStreamSupportedSamplingRates), value) == NO_ERROR);
-
- for (size_t i = 0; i < halKeys.size(); ++i) {
- String8 key;
- status_t status = halKeys.getAt(i, key);
- if (status != OK) return status;
- if (keepFormatValue && key == AudioParameter::keyFormat) {
- AudioParameter formatParam;
- halKeys.getAt(i, key, value);
- formatParam.add(key, value);
- key = formatParam.toString();
- }
- (*hidlKeys)[i] = key.string();
- }
- return OK;
-}
-
-// static
-status_t ConversionHelperHidl::parametersFromHal(
- const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams) {
- AudioParameter params(kvPairs);
- if (params.size() == 0) return BAD_VALUE;
- hidlParams->resize(params.size());
- for (size_t i = 0; i < params.size(); ++i) {
- String8 key, value;
- status_t status = params.getAt(i, key, value);
- if (status != OK) return status;
- (*hidlParams)[i].key = key.string();
- (*hidlParams)[i].value = value.string();
- }
- return OK;
-}
-
-// static
-void ConversionHelperHidl::parametersToHal(
- const hidl_vec<ParameterValue>& parameters, String8 *values) {
- AudioParameter params;
- for (size_t i = 0; i < parameters.size(); ++i) {
- params.add(String8(parameters[i].key.c_str()), String8(parameters[i].value.c_str()));
- }
- values->setTo(params.toString());
-}
-
-ConversionHelperHidl::ConversionHelperHidl(const char* className)
- : mClassName(className) {
-}
-
-// static
-status_t ConversionHelperHidl::analyzeResult(const Result& result) {
- switch (result) {
- case Result::OK: return OK;
- case Result::INVALID_ARGUMENTS: return BAD_VALUE;
- case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
- case Result::NOT_INITIALIZED: return NO_INIT;
- case Result::NOT_SUPPORTED: return INVALID_OPERATION;
- default: return NO_INIT;
- }
-}
-
-void ConversionHelperHidl::emitError(const char* funcName, const char* description) {
- ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.cpp b/media/libaudiohal/2.0/DeviceHalHidl.cpp
deleted file mode 100644
index 5b99d70..0000000
--- a/media/libaudiohal/2.0/DeviceHalHidl.cpp
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdio.h>
-
-#define LOG_TAG "DeviceHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IPrimaryDevice.h>
-#include <cutils/native_handle.h>
-#include <hwbinder/IPCThreadState.h>
-#include <utils/Log.h>
-
-#include "DeviceHalHidl.h"
-#include "HidlUtils.h"
-#include "StreamHalHidl.h"
-
-using ::android::hardware::audio::common::V2_0::AudioConfig;
-using ::android::hardware::audio::common::V2_0::AudioDevice;
-using ::android::hardware::audio::common::V2_0::AudioInputFlag;
-using ::android::hardware::audio::common::V2_0::AudioOutputFlag;
-using ::android::hardware::audio::common::V2_0::AudioPatchHandle;
-using ::android::hardware::audio::common::V2_0::AudioPort;
-using ::android::hardware::audio::common::V2_0::AudioPortConfig;
-using ::android::hardware::audio::common::V2_0::AudioMode;
-using ::android::hardware::audio::common::V2_0::AudioSource;
-using ::android::hardware::audio::common::V2_0::HidlUtils;
-using ::android::hardware::audio::V2_0::DeviceAddress;
-using ::android::hardware::audio::V2_0::IPrimaryDevice;
-using ::android::hardware::audio::V2_0::ParameterValue;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-
-namespace android {
-
-namespace {
-
-status_t deviceAddressFromHal(
- audio_devices_t device, const char* halAddress, DeviceAddress* address) {
- address->device = AudioDevice(device);
-
- if (halAddress == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
- return OK;
- }
- const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
- if (isInput) device &= ~AUDIO_DEVICE_BIT_IN;
- if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_A2DP) != 0)
- || (isInput && (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
- int status = sscanf(halAddress,
- "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX",
- &address->address.mac[0], &address->address.mac[1], &address->address.mac[2],
- &address->address.mac[3], &address->address.mac[4], &address->address.mac[5]);
- return status == 6 ? OK : BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_IP) != 0)
- || (isInput && (device & AUDIO_DEVICE_IN_IP) != 0)) {
- int status = sscanf(halAddress,
- "%hhu.%hhu.%hhu.%hhu",
- &address->address.ipv4[0], &address->address.ipv4[1],
- &address->address.ipv4[2], &address->address.ipv4[3]);
- return status == 4 ? OK : BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_USB)) != 0
- || (isInput && (device & AUDIO_DEVICE_IN_ALL_USB)) != 0) {
- int status = sscanf(halAddress,
- "card=%d;device=%d",
- &address->address.alsa.card, &address->address.alsa.device);
- return status == 2 ? OK : BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_BUS) != 0)
- || (isInput && (device & AUDIO_DEVICE_IN_BUS) != 0)) {
- if (halAddress != NULL) {
- address->busAddress = halAddress;
- return OK;
- }
- return BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0
- || (isInput && (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
- if (halAddress != NULL) {
- address->rSubmixAddress = halAddress;
- return OK;
- }
- return BAD_VALUE;
- }
- return OK;
-}
-
-} // namespace
-
-DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
- : ConversionHelperHidl("Device"), mDevice(device),
- mPrimaryDevice(IPrimaryDevice::castFrom(device)) {
-}
-
-DeviceHalHidl::~DeviceHalHidl() {
- if (mDevice != 0) {
- mDevice.clear();
- hardware::IPCThreadState::self()->flushCommands();
- }
-}
-
-status_t DeviceHalHidl::getSupportedDevices(uint32_t*) {
- // Obsolete.
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalHidl::initCheck() {
- if (mDevice == 0) return NO_INIT;
- return processReturn("initCheck", mDevice->initCheck());
-}
-
-status_t DeviceHalHidl::setVoiceVolume(float volume) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setVoiceVolume", mPrimaryDevice->setVoiceVolume(volume));
-}
-
-status_t DeviceHalHidl::setMasterVolume(float volume) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
-}
-
-status_t DeviceHalHidl::getMasterVolume(float *volume) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- Result retval;
- Return<void> ret = mPrimaryDevice->getMasterVolume(
- [&](Result r, float v) {
- retval = r;
- if (retval == Result::OK) {
- *volume = v;
- }
- });
- return processReturn("getMasterVolume", ret, retval);
-}
-
-status_t DeviceHalHidl::setMode(audio_mode_t mode) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setMode", mPrimaryDevice->setMode(AudioMode(mode)));
-}
-
-status_t DeviceHalHidl::setMicMute(bool state) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("setMicMute", mDevice->setMicMute(state));
-}
-
-status_t DeviceHalHidl::getMicMute(bool *state) {
- if (mDevice == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mDevice->getMicMute(
- [&](Result r, bool mute) {
- retval = r;
- if (retval == Result::OK) {
- *state = mute;
- }
- });
- return processReturn("getMicMute", ret, retval);
-}
-
-status_t DeviceHalHidl::setMasterMute(bool state) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("setMasterMute", mDevice->setMasterMute(state));
-}
-
-status_t DeviceHalHidl::getMasterMute(bool *state) {
- if (mDevice == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mDevice->getMasterMute(
- [&](Result r, bool mute) {
- retval = r;
- if (retval == Result::OK) {
- *state = mute;
- }
- });
- return processReturn("getMasterMute", ret, retval);
-}
-
-status_t DeviceHalHidl::setParameters(const String8& kvPairs) {
- if (mDevice == 0) return NO_INIT;
- hidl_vec<ParameterValue> hidlParams;
- status_t status = parametersFromHal(kvPairs, &hidlParams);
- if (status != OK) return status;
- return processReturn("setParameters", mDevice->setParameters(hidlParams));
-}
-
-status_t DeviceHalHidl::getParameters(const String8& keys, String8 *values) {
- values->clear();
- if (mDevice == 0) return NO_INIT;
- hidl_vec<hidl_string> hidlKeys;
- status_t status = keysFromHal(keys, &hidlKeys);
- if (status != OK) return status;
- Result retval;
- Return<void> ret = mDevice->getParameters(
- hidlKeys,
- [&](Result r, const hidl_vec<ParameterValue>& parameters) {
- retval = r;
- if (retval == Result::OK) {
- parametersToHal(parameters, values);
- }
- });
- return processReturn("getParameters", ret, retval);
-}
-
-status_t DeviceHalHidl::getInputBufferSize(
- const struct audio_config *config, size_t *size) {
- if (mDevice == 0) return NO_INIT;
- AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, &hidlConfig);
- Result retval;
- Return<void> ret = mDevice->getInputBufferSize(
- hidlConfig,
- [&](Result r, uint64_t bufferSize) {
- retval = r;
- if (retval == Result::OK) {
- *size = static_cast<size_t>(bufferSize);
- }
- });
- return processReturn("getInputBufferSize", ret, retval);
-}
-
-status_t DeviceHalHidl::openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream) {
- if (mDevice == 0) return NO_INIT;
- DeviceAddress hidlDevice;
- status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
- if (status != OK) return status;
- AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, &hidlConfig);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevice->openOutputStream(
- handle,
- hidlDevice,
- hidlConfig,
- AudioOutputFlag(flags),
- [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
- retval = r;
- if (retval == Result::OK) {
- *outStream = new StreamOutHalHidl(result);
- }
- HidlUtils::audioConfigToHal(suggestedConfig, config);
- });
- return processReturn("openOutputStream", ret, retval);
-}
-
-status_t DeviceHalHidl::openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream) {
- if (mDevice == 0) return NO_INIT;
- DeviceAddress hidlDevice;
- status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
- if (status != OK) return status;
- AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, &hidlConfig);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevice->openInputStream(
- handle,
- hidlDevice,
- hidlConfig,
- AudioInputFlag(flags),
- AudioSource(source),
- [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
- retval = r;
- if (retval == Result::OK) {
- *inStream = new StreamInHalHidl(result);
- }
- HidlUtils::audioConfigToHal(suggestedConfig, config);
- });
- return processReturn("openInputStream", ret, retval);
-}
-
-status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
-}
-
-status_t DeviceHalHidl::createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch) {
- if (mDevice == 0) return NO_INIT;
- hidl_vec<AudioPortConfig> hidlSources, hidlSinks;
- HidlUtils::audioPortConfigsFromHal(num_sources, sources, &hidlSources);
- HidlUtils::audioPortConfigsFromHal(num_sinks, sinks, &hidlSinks);
- Result retval;
- Return<void> ret = mDevice->createAudioPatch(
- hidlSources, hidlSinks,
- [&](Result r, AudioPatchHandle hidlPatch) {
- retval = r;
- if (retval == Result::OK) {
- *patch = static_cast<audio_patch_handle_t>(hidlPatch);
- }
- });
- return processReturn("createAudioPatch", ret, retval);
-}
-
-status_t DeviceHalHidl::releaseAudioPatch(audio_patch_handle_t patch) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
-}
-
-status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
- if (mDevice == 0) return NO_INIT;
- AudioPort hidlPort;
- HidlUtils::audioPortFromHal(*port, &hidlPort);
- Result retval;
- Return<void> ret = mDevice->getAudioPort(
- hidlPort,
- [&](Result r, const AudioPort& p) {
- retval = r;
- if (retval == Result::OK) {
- HidlUtils::audioPortToHal(p, port);
- }
- });
- return processReturn("getAudioPort", ret, retval);
-}
-
-status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
- if (mDevice == 0) return NO_INIT;
- AudioPortConfig hidlConfig;
- HidlUtils::audioPortConfigFromHal(*config, &hidlConfig);
- return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
-}
-
-status_t DeviceHalHidl::getMicrophones(
- std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
- if (mDevice == 0) return NO_INIT;
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalHidl::dump(int fd) {
- if (mDevice == 0) return NO_INIT;
- native_handle_t* hidlHandle = native_handle_create(1, 0);
- hidlHandle->data[0] = fd;
- Return<void> ret = mDevice->debugDump(hidlHandle);
- native_handle_delete(hidlHandle);
- return processReturn("dump", ret);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.cpp b/media/libaudiohal/2.0/DeviceHalLocal.cpp
deleted file mode 100644
index ec3bf78..0000000
--- a/media/libaudiohal/2.0/DeviceHalLocal.cpp
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DeviceHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "StreamHalLocal.h"
-
-namespace android {
-
-DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
- : mDev(dev) {
-}
-
-DeviceHalLocal::~DeviceHalLocal() {
- int status = audio_hw_device_close(mDev);
- ALOGW_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
- mDev = 0;
-}
-
-status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
- if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
- *devices = mDev->get_supported_devices(mDev);
- return OK;
-}
-
-status_t DeviceHalLocal::initCheck() {
- return mDev->init_check(mDev);
-}
-
-status_t DeviceHalLocal::setVoiceVolume(float volume) {
- return mDev->set_voice_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::setMasterVolume(float volume) {
- if (mDev->set_master_volume == NULL) return INVALID_OPERATION;
- return mDev->set_master_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::getMasterVolume(float *volume) {
- if (mDev->get_master_volume == NULL) return INVALID_OPERATION;
- return mDev->get_master_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::setMode(audio_mode_t mode) {
- return mDev->set_mode(mDev, mode);
-}
-
-status_t DeviceHalLocal::setMicMute(bool state) {
- return mDev->set_mic_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::getMicMute(bool *state) {
- return mDev->get_mic_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::setMasterMute(bool state) {
- if (mDev->set_master_mute == NULL) return INVALID_OPERATION;
- return mDev->set_master_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::getMasterMute(bool *state) {
- if (mDev->get_master_mute == NULL) return INVALID_OPERATION;
- return mDev->get_master_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::setParameters(const String8& kvPairs) {
- return mDev->set_parameters(mDev, kvPairs.string());
-}
-
-status_t DeviceHalLocal::getParameters(const String8& keys, String8 *values) {
- char *halValues = mDev->get_parameters(mDev, keys.string());
- if (halValues != NULL) {
- values->setTo(halValues);
- free(halValues);
- } else {
- values->clear();
- }
- return OK;
-}
-
-status_t DeviceHalLocal::getInputBufferSize(
- const struct audio_config *config, size_t *size) {
- *size = mDev->get_input_buffer_size(mDev, config);
- return OK;
-}
-
-status_t DeviceHalLocal::openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream) {
- audio_stream_out_t *halStream;
- ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
- "srate: %d format %#x channels %x address %s",
- handle, devices, flags,
- config->sample_rate, config->format, config->channel_mask,
- address);
- int openResut = mDev->open_output_stream(
- mDev, handle, devices, flags, config, &halStream, address);
- if (openResut == OK) {
- *outStream = new StreamOutHalLocal(halStream, this);
- }
- ALOGV("open_output_stream status %d stream %p", openResut, halStream);
- return openResut;
-}
-
-status_t DeviceHalLocal::openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream) {
- audio_stream_in_t *halStream;
- ALOGV("open_input_stream handle: %d devices: %x flags: %#x "
- "srate: %d format %#x channels %x address %s source %d",
- handle, devices, flags,
- config->sample_rate, config->format, config->channel_mask,
- address, source);
- int openResult = mDev->open_input_stream(
- mDev, handle, devices, config, &halStream, flags, address, source);
- if (openResult == OK) {
- *inStream = new StreamInHalLocal(halStream, this);
- }
- ALOGV("open_input_stream status %d stream %p", openResult, inStream);
- return openResult;
-}
-
-status_t DeviceHalLocal::supportsAudioPatches(bool *supportsPatches) {
- *supportsPatches = version() >= AUDIO_DEVICE_API_VERSION_3_0;
- return OK;
-}
-
-status_t DeviceHalLocal::createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch) {
- if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- return mDev->create_audio_patch(
- mDev, num_sources, sources, num_sinks, sinks, patch);
- } else {
- return INVALID_OPERATION;
- }
-}
-
-status_t DeviceHalLocal::releaseAudioPatch(audio_patch_handle_t patch) {
- if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- return mDev->release_audio_patch(mDev, patch);
- } else {
- return INVALID_OPERATION;
- }
-}
-
-status_t DeviceHalLocal::getAudioPort(struct audio_port *port) {
- return mDev->get_audio_port(mDev, port);
-}
-
-status_t DeviceHalLocal::setAudioPortConfig(const struct audio_port_config *config) {
- if (version() >= AUDIO_DEVICE_API_VERSION_3_0)
- return mDev->set_audio_port_config(mDev, config);
- else
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalLocal::getMicrophones(
- std::vector<media::MicrophoneInfo> *microphones __unused) {
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalLocal::dump(int fd) {
- return mDev->dump(mDev, fd);
-}
-
-void DeviceHalLocal::closeOutputStream(struct audio_stream_out *stream_out) {
- mDev->close_output_stream(mDev, stream_out);
-}
-
-void DeviceHalLocal::closeInputStream(struct audio_stream_in *stream_in) {
- mDev->close_input_stream(mDev, stream_in);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp b/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp
deleted file mode 100644
index 5b33592..0000000
--- a/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "DevicesFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IDevice.h>
-#include <media/audiohal/hidl/HalDeathHandler.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "DeviceHalHidl.h"
-#include "DevicesFactoryHalHidl.h"
-
-using ::android::hardware::audio::V2_0::IDevice;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
- mDevicesFactory = IDevicesFactory::getService();
- if (mDevicesFactory != 0) {
- // It is assumed that DevicesFactory is owned by AudioFlinger
- // and thus have the same lifespan.
- mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
- } else {
- ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
- exit(1);
- }
- // The MSD factory is optional
- mDevicesFactoryMsd = IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD);
- // TODO: Register death handler, and add 'restart' directive to audioserver.rc
-}
-
-DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
-}
-
-// static
-status_t DevicesFactoryHalHidl::nameFromHal(const char *name, IDevicesFactory::Device *device) {
- if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
- *device = IDevicesFactory::Device::PRIMARY;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
- *device = IDevicesFactory::Device::A2DP;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
- *device = IDevicesFactory::Device::USB;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
- *device = IDevicesFactory::Device::R_SUBMIX;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
- *device = IDevicesFactory::Device::STUB;
- return OK;
- }
- ALOGE("Invalid device name %s", name);
- return BAD_VALUE;
-}
-
-status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- if (mDevicesFactory == 0) return NO_INIT;
- IDevicesFactory::Device hidlDevice;
- status_t status = nameFromHal(name, &hidlDevice);
- if (status != OK) return status;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevicesFactory->openDevice(
- hidlDevice,
- [&](Result r, const sp<IDevice>& result) {
- retval = r;
- if (retval == Result::OK) {
- *device = new DeviceHalHidl(result);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
- else return NO_INIT;
- }
- return FAILED_TRANSACTION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp b/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp
deleted file mode 100644
index 13a9acd..0000000
--- a/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DevicesFactoryHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <string.h>
-
-#include <hardware/audio.h>
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "DevicesFactoryHalLocal.h"
-
-namespace android {
-
-static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
-{
- const hw_module_t *mod;
- int rc;
-
- rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
- if (rc) {
- ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
- AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
- goto out;
- }
- rc = audio_hw_device_open(mod, dev);
- if (rc) {
- ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
- AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
- goto out;
- }
- if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
- ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
- rc = BAD_VALUE;
- audio_hw_device_close(*dev);
- goto out;
- }
- return OK;
-
-out:
- *dev = NULL;
- return rc;
-}
-
-status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- audio_hw_device_t *dev;
- status_t rc = load_audio_interface(name, &dev);
- if (rc == OK) {
- *device = new DeviceHalLocal(dev);
- }
- return rc;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/EffectBufferHalHidl.cpp b/media/libaudiohal/2.0/EffectBufferHalHidl.cpp
deleted file mode 100644
index 226a500..0000000
--- a/media/libaudiohal/2.0/EffectBufferHalHidl.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <atomic>
-
-#define LOG_TAG "EffectBufferHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hidl/allocator/1.0/IAllocator.h>
-#include <hidlmemory/mapping.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-
-using ::android::hardware::Return;
-using ::android::hidl::allocator::V1_0::IAllocator;
-
-namespace android {
-
-// static
-uint64_t EffectBufferHalHidl::makeUniqueId() {
- static std::atomic<uint64_t> counter{1};
- return counter++;
-}
-
-status_t EffectBufferHalHidl::allocate(
- size_t size, sp<EffectBufferHalInterface>* buffer) {
- return mirror(nullptr, size, buffer);
-}
-
-status_t EffectBufferHalHidl::mirror(
- void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
- sp<EffectBufferHalInterface> tempBuffer = new EffectBufferHalHidl(size);
- status_t result = static_cast<EffectBufferHalHidl*>(tempBuffer.get())->init();
- if (result == OK) {
- tempBuffer->setExternalData(external);
- *buffer = tempBuffer;
- }
- return result;
-}
-
-EffectBufferHalHidl::EffectBufferHalHidl(size_t size)
- : mBufferSize(size), mFrameCountChanged(false),
- mExternalData(nullptr), mAudioBuffer{0, {nullptr}} {
- mHidlBuffer.id = makeUniqueId();
- mHidlBuffer.frameCount = 0;
-}
-
-EffectBufferHalHidl::~EffectBufferHalHidl() {
-}
-
-status_t EffectBufferHalHidl::init() {
- sp<IAllocator> ashmem = IAllocator::getService("ashmem");
- if (ashmem == 0) {
- ALOGE("Failed to retrieve ashmem allocator service");
- return NO_INIT;
- }
- status_t retval = NO_MEMORY;
- Return<void> result = ashmem->allocate(
- mBufferSize,
- [&](bool success, const hidl_memory& memory) {
- if (success) {
- mHidlBuffer.data = memory;
- retval = OK;
- }
- });
- if (result.isOk() && retval == OK) {
- mMemory = hardware::mapMemory(mHidlBuffer.data);
- if (mMemory != 0) {
- mMemory->update();
- mAudioBuffer.raw = static_cast<void*>(mMemory->getPointer());
- memset(mAudioBuffer.raw, 0, mMemory->getSize());
- mMemory->commit();
- } else {
- ALOGE("Failed to map allocated ashmem");
- retval = NO_MEMORY;
- }
- } else {
- ALOGE("Failed to allocate %d bytes from ashmem", (int)mBufferSize);
- }
- return result.isOk() ? retval : FAILED_TRANSACTION;
-}
-
-audio_buffer_t* EffectBufferHalHidl::audioBuffer() {
- return &mAudioBuffer;
-}
-
-void* EffectBufferHalHidl::externalData() const {
- return mExternalData;
-}
-
-void EffectBufferHalHidl::setFrameCount(size_t frameCount) {
- mHidlBuffer.frameCount = frameCount;
- mAudioBuffer.frameCount = frameCount;
- mFrameCountChanged = true;
-}
-
-bool EffectBufferHalHidl::checkFrameCountChange() {
- bool result = mFrameCountChanged;
- mFrameCountChanged = false;
- return result;
-}
-
-void EffectBufferHalHidl::setExternalData(void* external) {
- mExternalData = external;
-}
-
-void EffectBufferHalHidl::update() {
- update(mBufferSize);
-}
-
-void EffectBufferHalHidl::commit() {
- commit(mBufferSize);
-}
-
-void EffectBufferHalHidl::update(size_t size) {
- if (mExternalData == nullptr) return;
- mMemory->update();
- if (size > mBufferSize) size = mBufferSize;
- memcpy(mAudioBuffer.raw, mExternalData, size);
- mMemory->commit();
-}
-
-void EffectBufferHalHidl::commit(size_t size) {
- if (mExternalData == nullptr) return;
- if (size > mBufferSize) size = mBufferSize;
- memcpy(mExternalData, mAudioBuffer.raw, size);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/EffectHalHidl.cpp b/media/libaudiohal/2.0/EffectHalHidl.cpp
deleted file mode 100644
index 4fb032c..0000000
--- a/media/libaudiohal/2.0/EffectHalHidl.cpp
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <hwbinder/IPCThreadState.h>
-#include <media/EffectsFactoryApi.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-#include "EffectHalHidl.h"
-#include "HidlUtils.h"
-
-using ::android::hardware::audio::effect::V2_0::AudioBuffer;
-using ::android::hardware::audio::effect::V2_0::EffectBufferAccess;
-using ::android::hardware::audio::effect::V2_0::EffectConfigParameters;
-using ::android::hardware::audio::effect::V2_0::MessageQueueFlagBits;
-using ::android::hardware::audio::effect::V2_0::Result;
-using ::android::hardware::audio::common::V2_0::HidlUtils;
-using ::android::hardware::audio::common::V2_0::AudioChannelMask;
-using ::android::hardware::audio::common::V2_0::AudioFormat;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-
-namespace android {
-
-EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
- : mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
-}
-
-EffectHalHidl::~EffectHalHidl() {
- if (mEffect != 0) {
- close();
- mEffect.clear();
- hardware::IPCThreadState::self()->flushCommands();
- }
- if (mEfGroup) {
- EventFlag::deleteEventFlag(&mEfGroup);
- }
-}
-
-// static
-void EffectHalHidl::effectDescriptorToHal(
- const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
- HidlUtils::uuidToHal(descriptor.type, &halDescriptor->type);
- HidlUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
- halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
- halDescriptor->cpuLoad = descriptor.cpuLoad;
- halDescriptor->memoryUsage = descriptor.memoryUsage;
- memcpy(halDescriptor->name, descriptor.name.data(), descriptor.name.size());
- memcpy(halDescriptor->implementor,
- descriptor.implementor.data(), descriptor.implementor.size());
-}
-
-// TODO(mnaganov): These buffer conversion functions should be shared with Effect wrapper
-// via HidlUtils. Move them there when hardware/interfaces will get un-frozen again.
-
-// static
-void EffectHalHidl::effectBufferConfigFromHal(
- const buffer_config_t& halConfig, EffectBufferConfig* config) {
- config->samplingRateHz = halConfig.samplingRate;
- config->channels = AudioChannelMask(halConfig.channels);
- config->format = AudioFormat(halConfig.format);
- config->accessMode = EffectBufferAccess(halConfig.accessMode);
- config->mask = EffectConfigParameters(halConfig.mask);
-}
-
-// static
-void EffectHalHidl::effectBufferConfigToHal(
- const EffectBufferConfig& config, buffer_config_t* halConfig) {
- halConfig->buffer.frameCount = 0;
- halConfig->buffer.raw = NULL;
- halConfig->samplingRate = config.samplingRateHz;
- halConfig->channels = static_cast<uint32_t>(config.channels);
- halConfig->bufferProvider.cookie = NULL;
- halConfig->bufferProvider.getBuffer = NULL;
- halConfig->bufferProvider.releaseBuffer = NULL;
- halConfig->format = static_cast<uint8_t>(config.format);
- halConfig->accessMode = static_cast<uint8_t>(config.accessMode);
- halConfig->mask = static_cast<uint8_t>(config.mask);
-}
-
-// static
-void EffectHalHidl::effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config) {
- effectBufferConfigFromHal(halConfig.inputCfg, &config->inputCfg);
- effectBufferConfigFromHal(halConfig.outputCfg, &config->outputCfg);
-}
-
-// static
-void EffectHalHidl::effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig) {
- effectBufferConfigToHal(config.inputCfg, &halConfig->inputCfg);
- effectBufferConfigToHal(config.outputCfg, &halConfig->outputCfg);
-}
-
-// static
-status_t EffectHalHidl::analyzeResult(const Result& result) {
- switch (result) {
- case Result::OK: return OK;
- case Result::INVALID_ARGUMENTS: return BAD_VALUE;
- case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
- case Result::NOT_INITIALIZED: return NO_INIT;
- case Result::NOT_SUPPORTED: return INVALID_OPERATION;
- case Result::RESULT_TOO_BIG: return NO_MEMORY;
- default: return NO_INIT;
- }
-}
-
-status_t EffectHalHidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (!mBuffersChanged) {
- if (buffer.get() == nullptr || mInBuffer.get() == nullptr) {
- mBuffersChanged = buffer.get() != mInBuffer.get();
- } else {
- mBuffersChanged = buffer->audioBuffer() != mInBuffer->audioBuffer();
- }
- }
- mInBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalHidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (!mBuffersChanged) {
- if (buffer.get() == nullptr || mOutBuffer.get() == nullptr) {
- mBuffersChanged = buffer.get() != mOutBuffer.get();
- } else {
- mBuffersChanged = buffer->audioBuffer() != mOutBuffer->audioBuffer();
- }
- }
- mOutBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalHidl::process() {
- return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS));
-}
-
-status_t EffectHalHidl::processReverse() {
- return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS_REVERSE));
-}
-
-status_t EffectHalHidl::prepareForProcessing() {
- std::unique_ptr<StatusMQ> tempStatusMQ;
- Result retval;
- Return<void> ret = mEffect->prepareForProcessing(
- [&](Result r, const MQDescriptorSync<Result>& statusMQ) {
- retval = r;
- if (retval == Result::OK) {
- tempStatusMQ.reset(new StatusMQ(statusMQ));
- if (tempStatusMQ->isValid() && tempStatusMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(tempStatusMQ->getEventFlagWord(), &mEfGroup);
- }
- }
- });
- if (!ret.isOk() || retval != Result::OK) {
- return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
- }
- if (!tempStatusMQ || !tempStatusMQ->isValid() || !mEfGroup) {
- ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for effects");
- ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
- "Status message queue for effects is invalid");
- ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
- return NO_INIT;
- }
- mStatusMQ = std::move(tempStatusMQ);
- return OK;
-}
-
-bool EffectHalHidl::needToResetBuffers() {
- if (mBuffersChanged) return true;
- bool inBufferFrameCountUpdated = mInBuffer->checkFrameCountChange();
- bool outBufferFrameCountUpdated = mOutBuffer->checkFrameCountChange();
- return inBufferFrameCountUpdated || outBufferFrameCountUpdated;
-}
-
-status_t EffectHalHidl::processImpl(uint32_t mqFlag) {
- if (mEffect == 0 || mInBuffer == 0 || mOutBuffer == 0) return NO_INIT;
- status_t status;
- if (!mStatusMQ && (status = prepareForProcessing()) != OK) {
- return status;
- }
- if (needToResetBuffers() && (status = setProcessBuffers()) != OK) {
- return status;
- }
- // The data is already in the buffers, just need to flush it and wake up the server side.
- std::atomic_thread_fence(std::memory_order_release);
- mEfGroup->wake(mqFlag);
- uint32_t efState = 0;
-retry:
- status_t ret = mEfGroup->wait(
- static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING), &efState);
- if (efState & static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING)) {
- Result retval = Result::NOT_INITIALIZED;
- mStatusMQ->read(&retval);
- if (retval == Result::OK || retval == Result::INVALID_STATE) {
- // Sync back the changed contents of the buffer.
- std::atomic_thread_fence(std::memory_order_acquire);
- }
- return analyzeResult(retval);
- }
- if (ret == -EAGAIN || ret == -EINTR) {
- // Spurious wakeup. This normally retries no more than once.
- goto retry;
- }
- return ret;
-}
-
-status_t EffectHalHidl::setProcessBuffers() {
- Return<Result> ret = mEffect->setProcessBuffers(
- static_cast<EffectBufferHalHidl*>(mInBuffer.get())->hidlBuffer(),
- static_cast<EffectBufferHalHidl*>(mOutBuffer.get())->hidlBuffer());
- if (ret.isOk() && ret == Result::OK) {
- mBuffersChanged = false;
- return OK;
- }
- return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData) {
- if (mEffect == 0) return NO_INIT;
-
- // Special cases.
- if (cmdCode == EFFECT_CMD_SET_CONFIG || cmdCode == EFFECT_CMD_SET_CONFIG_REVERSE) {
- return setConfigImpl(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
- } else if (cmdCode == EFFECT_CMD_GET_CONFIG || cmdCode == EFFECT_CMD_GET_CONFIG_REVERSE) {
- return getConfigImpl(cmdCode, replySize, pReplyData);
- }
-
- // Common case.
- hidl_vec<uint8_t> hidlData;
- if (pCmdData != nullptr && cmdSize > 0) {
- hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
- }
- status_t status;
- uint32_t replySizeStub = 0;
- if (replySize == nullptr || pReplyData == nullptr) replySize = &replySizeStub;
- Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
- [&](int32_t s, const hidl_vec<uint8_t>& result) {
- status = s;
- if (status == 0) {
- if (*replySize > result.size()) *replySize = result.size();
- if (pReplyData != nullptr && *replySize > 0) {
- memcpy(pReplyData, &result[0], *replySize);
- }
- }
- });
- return ret.isOk() ? status : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
- if (mEffect == 0) return NO_INIT;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffect->getDescriptor(
- [&](Result r, const EffectDescriptor& result) {
- retval = r;
- if (retval == Result::OK) {
- effectDescriptorToHal(result, pDescriptor);
- }
- });
- return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::close() {
- if (mEffect == 0) return NO_INIT;
- Return<Result> ret = mEffect->close();
- return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::getConfigImpl(
- uint32_t cmdCode, uint32_t *replySize, void *pReplyData) {
- if (replySize == NULL || *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
- return BAD_VALUE;
- }
- status_t result = FAILED_TRANSACTION;
- Return<void> ret;
- if (cmdCode == EFFECT_CMD_GET_CONFIG) {
- ret = mEffect->getConfig([&] (Result r, const EffectConfig &hidlConfig) {
- result = analyzeResult(r);
- if (r == Result::OK) {
- effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
- }
- });
- } else {
- ret = mEffect->getConfigReverse([&] (Result r, const EffectConfig &hidlConfig) {
- result = analyzeResult(r);
- if (r == Result::OK) {
- effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
- }
- });
- }
- if (!ret.isOk()) {
- result = FAILED_TRANSACTION;
- }
- return result;
-}
-
-status_t EffectHalHidl::setConfigImpl(
- uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, void *pReplyData) {
- if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) ||
- replySize == NULL || *replySize != sizeof(int32_t) || pReplyData == NULL) {
- return BAD_VALUE;
- }
- const effect_config_t *halConfig = static_cast<effect_config_t*>(pCmdData);
- if (halConfig->inputCfg.bufferProvider.getBuffer != NULL ||
- halConfig->inputCfg.bufferProvider.releaseBuffer != NULL ||
- halConfig->outputCfg.bufferProvider.getBuffer != NULL ||
- halConfig->outputCfg.bufferProvider.releaseBuffer != NULL) {
- ALOGE("Buffer provider callbacks are not supported");
- }
- EffectConfig hidlConfig;
- effectConfigFromHal(*halConfig, &hidlConfig);
- Return<Result> ret = cmdCode == EFFECT_CMD_SET_CONFIG ?
- mEffect->setConfig(hidlConfig, nullptr, nullptr) :
- mEffect->setConfigReverse(hidlConfig, nullptr, nullptr);
- status_t result = FAILED_TRANSACTION;
- if (ret.isOk()) {
- result = analyzeResult(ret);
- *static_cast<int32_t*>(pReplyData) = result;
- }
- return result;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp b/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp
deleted file mode 100644
index 0d40e6d..0000000
--- a/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectsFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <cutils/native_handle.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-#include "EffectHalHidl.h"
-#include "EffectsFactoryHalHidl.h"
-#include "HidlUtils.h"
-
-using ::android::hardware::audio::common::V2_0::HidlUtils;
-using ::android::hardware::audio::common::V2_0::Uuid;
-using ::android::hardware::audio::effect::V2_0::IEffect;
-using ::android::hardware::audio::effect::V2_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-
-EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
- mEffectsFactory = IEffectsFactory::getService();
- if (mEffectsFactory == 0) {
- ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
- exit(1);
- }
-}
-
-EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
-}
-
-status_t EffectsFactoryHalHidl::queryAllDescriptors() {
- if (mEffectsFactory == 0) return NO_INIT;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->getAllDescriptors(
- [&](Result r, const hidl_vec<EffectDescriptor>& result) {
- retval = r;
- if (retval == Result::OK) {
- mLastDescriptors = result;
- }
- });
- if (ret.isOk()) {
- return retval == Result::OK ? OK : NO_INIT;
- }
- mLastDescriptors.resize(0);
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::queryNumberEffects(uint32_t *pNumEffects) {
- status_t queryResult = queryAllDescriptors();
- if (queryResult == OK) {
- *pNumEffects = mLastDescriptors.size();
- }
- return queryResult;
-}
-
-status_t EffectsFactoryHalHidl::getDescriptor(
- uint32_t index, effect_descriptor_t *pDescriptor) {
- // TODO: We need somehow to track the changes on the server side
- // or figure out how to convert everybody to query all the descriptors at once.
- // TODO: check for nullptr
- if (mLastDescriptors.size() == 0) {
- status_t queryResult = queryAllDescriptors();
- if (queryResult != OK) return queryResult;
- }
- if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
- EffectHalHidl::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
- return OK;
-}
-
-status_t EffectsFactoryHalHidl::getDescriptor(
- const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
- // TODO: check for nullptr
- if (mEffectsFactory == 0) return NO_INIT;
- Uuid hidlUuid;
- HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->getDescriptor(hidlUuid,
- [&](Result r, const EffectDescriptor& result) {
- retval = r;
- if (retval == Result::OK) {
- EffectHalHidl::effectDescriptorToHal(result, pDescriptor);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
- else return NO_INIT;
- }
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::createEffect(
- const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect) {
- if (mEffectsFactory == 0) return NO_INIT;
- Uuid hidlUuid;
- HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->createEffect(
- hidlUuid, sessionId, ioId,
- [&](Result r, const sp<IEffect>& result, uint64_t effectId) {
- retval = r;
- if (retval == Result::OK) {
- *effect = new EffectHalHidl(result, effectId);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
- else return NO_INIT;
- }
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::dumpEffects(int fd) {
- if (mEffectsFactory == 0) return NO_INIT;
- native_handle_t* hidlHandle = native_handle_create(1, 0);
- hidlHandle->data[0] = fd;
- Return<void> ret = mEffectsFactory->debugDump(hidlHandle);
- native_handle_delete(hidlHandle);
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) {
- return EffectBufferHalHidl::allocate(size, buffer);
-}
-
-status_t EffectsFactoryHalHidl::mirrorBuffer(void* external, size_t size,
- sp<EffectBufferHalInterface>* buffer) {
- return EffectBufferHalHidl::mirror(external, size, buffer);
-}
-
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalHidl.cpp b/media/libaudiohal/2.0/StreamHalHidl.cpp
deleted file mode 100644
index 9869cd2..0000000
--- a/media/libaudiohal/2.0/StreamHalHidl.cpp
+++ /dev/null
@@ -1,768 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "StreamHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IStreamOutCallback.h>
-#include <hwbinder/IPCThreadState.h>
-#include <mediautils/SchedulingPolicyService.h>
-#include <utils/Log.h>
-
-#include "DeviceHalHidl.h"
-#include "EffectHalHidl.h"
-#include "StreamHalHidl.h"
-
-using ::android::hardware::audio::common::V2_0::AudioChannelMask;
-using ::android::hardware::audio::common::V2_0::AudioFormat;
-using ::android::hardware::audio::common::V2_0::ThreadInfo;
-using ::android::hardware::audio::V2_0::AudioDrain;
-using ::android::hardware::audio::V2_0::IStreamOutCallback;
-using ::android::hardware::audio::V2_0::MessageQueueFlagBits;
-using ::android::hardware::audio::V2_0::MmapBufferInfo;
-using ::android::hardware::audio::V2_0::MmapPosition;
-using ::android::hardware::audio::V2_0::ParameterValue;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::audio::V2_0::TimeSpec;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ReadCommand = ::android::hardware::audio::V2_0::IStreamIn::ReadCommand;
-
-namespace android {
-
-StreamHalHidl::StreamHalHidl(IStream *stream)
- : ConversionHelperHidl("Stream"),
- mStream(stream),
- mHalThreadPriority(HAL_THREAD_PRIORITY_DEFAULT),
- mCachedBufferSize(0){
-
- // Instrument audio signal power logging.
- // Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
- // Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
- Return<void> ret = mStream->getAudioProperties(
- [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
- mStreamPowerLog.init(sr,
- static_cast<audio_channel_mask_t>(m),
- static_cast<audio_format_t>(f));
- });
- }
-}
-
-StreamHalHidl::~StreamHalHidl() {
- mStream = nullptr;
-}
-
-status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
- if (!mStream) return NO_INIT;
- return processReturn("getSampleRate", mStream->getSampleRate(), rate);
-}
-
-status_t StreamHalHidl::getBufferSize(size_t *size) {
- if (!mStream) return NO_INIT;
- status_t status = processReturn("getBufferSize", mStream->getBufferSize(), size);
- if (status == OK) {
- mCachedBufferSize = *size;
- }
- return status;
-}
-
-status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
- if (!mStream) return NO_INIT;
- return processReturn("getChannelMask", mStream->getChannelMask(), mask);
-}
-
-status_t StreamHalHidl::getFormat(audio_format_t *format) {
- if (!mStream) return NO_INIT;
- return processReturn("getFormat", mStream->getFormat(), format);
-}
-
-status_t StreamHalHidl::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
- if (!mStream) return NO_INIT;
- Return<void> ret = mStream->getAudioProperties(
- [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
- *sampleRate = sr;
- *mask = static_cast<audio_channel_mask_t>(m);
- *format = static_cast<audio_format_t>(f);
- });
- return processReturn("getAudioProperties", ret);
-}
-
-status_t StreamHalHidl::setParameters(const String8& kvPairs) {
- if (!mStream) return NO_INIT;
- hidl_vec<ParameterValue> hidlParams;
- status_t status = parametersFromHal(kvPairs, &hidlParams);
- if (status != OK) return status;
- return processReturn("setParameters", mStream->setParameters(hidlParams));
-}
-
-status_t StreamHalHidl::getParameters(const String8& keys, String8 *values) {
- values->clear();
- if (!mStream) return NO_INIT;
- hidl_vec<hidl_string> hidlKeys;
- status_t status = keysFromHal(keys, &hidlKeys);
- if (status != OK) return status;
- Result retval;
- Return<void> ret = mStream->getParameters(
- hidlKeys,
- [&](Result r, const hidl_vec<ParameterValue>& parameters) {
- retval = r;
- if (retval == Result::OK) {
- parametersToHal(parameters, values);
- }
- });
- return processReturn("getParameters", ret, retval);
-}
-
-status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
- if (!mStream) return NO_INIT;
- return processReturn("addEffect", mStream->addEffect(
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
-}
-
-status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
- if (!mStream) return NO_INIT;
- return processReturn("removeEffect", mStream->removeEffect(
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
-}
-
-status_t StreamHalHidl::standby() {
- if (!mStream) return NO_INIT;
- return processReturn("standby", mStream->standby());
-}
-
-status_t StreamHalHidl::dump(int fd) {
- if (!mStream) return NO_INIT;
- native_handle_t* hidlHandle = native_handle_create(1, 0);
- hidlHandle->data[0] = fd;
- Return<void> ret = mStream->debugDump(hidlHandle);
- native_handle_delete(hidlHandle);
- mStreamPowerLog.dump(fd);
- return processReturn("dump", ret);
-}
-
-status_t StreamHalHidl::start() {
- if (!mStream) return NO_INIT;
- return processReturn("start", mStream->start());
-}
-
-status_t StreamHalHidl::stop() {
- if (!mStream) return NO_INIT;
- return processReturn("stop", mStream->stop());
-}
-
-status_t StreamHalHidl::createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) {
- Result retval;
- Return<void> ret = mStream->createMmapBuffer(
- minSizeFrames,
- [&](Result r, const MmapBufferInfo& hidlInfo) {
- retval = r;
- if (retval == Result::OK) {
- const native_handle *handle = hidlInfo.sharedMemory.handle();
- if (handle->numFds > 0) {
- info->shared_memory_fd = handle->data[0];
- info->buffer_size_frames = hidlInfo.bufferSizeFrames;
- info->burst_size_frames = hidlInfo.burstSizeFrames;
- // info->shared_memory_address is not needed in HIDL context
- info->shared_memory_address = NULL;
- } else {
- retval = Result::NOT_INITIALIZED;
- }
- }
- });
- return processReturn("createMmapBuffer", ret, retval);
-}
-
-status_t StreamHalHidl::getMmapPosition(struct audio_mmap_position *position) {
- Result retval;
- Return<void> ret = mStream->getMmapPosition(
- [&](Result r, const MmapPosition& hidlPosition) {
- retval = r;
- if (retval == Result::OK) {
- position->time_nanoseconds = hidlPosition.timeNanoseconds;
- position->position_frames = hidlPosition.positionFrames;
- }
- });
- return processReturn("getMmapPosition", ret, retval);
-}
-
-status_t StreamHalHidl::setHalThreadPriority(int priority) {
- mHalThreadPriority = priority;
- return OK;
-}
-
-status_t StreamHalHidl::getCachedBufferSize(size_t *size) {
- if (mCachedBufferSize != 0) {
- *size = mCachedBufferSize;
- return OK;
- }
- return getBufferSize(size);
-}
-
-bool StreamHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
- if (mHalThreadPriority == HAL_THREAD_PRIORITY_DEFAULT) {
- return true;
- }
- int err = requestPriority(
- threadPid, threadId,
- mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
- ALOGE_IF(err, "failed to set priority %d for pid %d tid %d; error %d",
- mHalThreadPriority, threadPid, threadId, err);
- // Audio will still work, but latency will be higher and sometimes unacceptable.
- return err == 0;
-}
-
-namespace {
-
-/* Notes on callback ownership.
-
-This is how (Hw)Binder ownership model looks like. The server implementation
-is owned by Binder framework (via sp<>). Proxies are owned by clients.
-When the last proxy disappears, Binder framework releases the server impl.
-
-Thus, it is not needed to keep any references to StreamOutCallback (this is
-the server impl) -- it will live as long as HAL server holds a strong ref to
-IStreamOutCallback proxy. We clear that reference by calling 'clearCallback'
-from the destructor of StreamOutHalHidl.
-
-The callback only keeps a weak reference to the stream. The stream is owned
-by AudioFlinger.
-
-*/
-
-struct StreamOutCallback : public IStreamOutCallback {
- StreamOutCallback(const wp<StreamOutHalHidl>& stream) : mStream(stream) {}
-
- // IStreamOutCallback implementation
- Return<void> onWriteReady() override {
- sp<StreamOutHalHidl> stream = mStream.promote();
- if (stream != 0) {
- stream->onWriteReady();
- }
- return Void();
- }
-
- Return<void> onDrainReady() override {
- sp<StreamOutHalHidl> stream = mStream.promote();
- if (stream != 0) {
- stream->onDrainReady();
- }
- return Void();
- }
-
- Return<void> onError() override {
- sp<StreamOutHalHidl> stream = mStream.promote();
- if (stream != 0) {
- stream->onError();
- }
- return Void();
- }
-
- private:
- wp<StreamOutHalHidl> mStream;
-};
-
-} // namespace
-
-StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
- : StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
-}
-
-StreamOutHalHidl::~StreamOutHalHidl() {
- if (mStream != 0) {
- if (mCallback.unsafe_get()) {
- processReturn("clearCallback", mStream->clearCallback());
- }
- processReturn("close", mStream->close());
- mStream.clear();
- }
- mCallback.clear();
- hardware::IPCThreadState::self()->flushCommands();
- if (mEfGroup) {
- EventFlag::deleteEventFlag(&mEfGroup);
- }
-}
-
-status_t StreamOutHalHidl::getFrameSize(size_t *size) {
- if (mStream == 0) return NO_INIT;
- return processReturn("getFrameSize", mStream->getFrameSize(), size);
-}
-
-status_t StreamOutHalHidl::getLatency(uint32_t *latency) {
- if (mStream == 0) return NO_INIT;
- if (mWriterClient == gettid() && mCommandMQ) {
- return callWriterThread(
- WriteCommand::GET_LATENCY, "getLatency", nullptr, 0,
- [&](const WriteStatus& writeStatus) {
- *latency = writeStatus.reply.latencyMs;
- });
- } else {
- return processReturn("getLatency", mStream->getLatency(), latency);
- }
-}
-
-status_t StreamOutHalHidl::setVolume(float left, float right) {
- if (mStream == 0) return NO_INIT;
- return processReturn("setVolume", mStream->setVolume(left, right));
-}
-
-status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
- if (mStream == 0) return NO_INIT;
- *written = 0;
-
- if (bytes == 0 && !mDataMQ) {
- // Can't determine the size for the MQ buffer. Wait for a non-empty write request.
- ALOGW_IF(mCallback.unsafe_get(), "First call to async write with 0 bytes");
- return OK;
- }
-
- status_t status;
- if (!mDataMQ) {
- // In case if playback starts close to the end of a compressed track, the bytes
- // that need to be written is less than the actual buffer size. Need to use
- // full buffer size for the MQ since otherwise after seeking back to the middle
- // data will be truncated.
- size_t bufferSize;
- if ((status = getCachedBufferSize(&bufferSize)) != OK) {
- return status;
- }
- if (bytes > bufferSize) bufferSize = bytes;
- if ((status = prepareForWriting(bufferSize)) != OK) {
- return status;
- }
- }
-
- status = callWriterThread(
- WriteCommand::WRITE, "write", static_cast<const uint8_t*>(buffer), bytes,
- [&] (const WriteStatus& writeStatus) {
- *written = writeStatus.reply.written;
- // Diagnostics of the cause of b/35813113.
- ALOGE_IF(*written > bytes,
- "hal reports more bytes written than asked for: %lld > %lld",
- (long long)*written, (long long)bytes);
- });
- mStreamPowerLog.log(buffer, *written);
- return status;
-}
-
-status_t StreamOutHalHidl::callWriterThread(
- WriteCommand cmd, const char* cmdName,
- const uint8_t* data, size_t dataSize, StreamOutHalHidl::WriterCallback callback) {
- if (!mCommandMQ->write(&cmd)) {
- ALOGE("command message queue write failed for \"%s\"", cmdName);
- return -EAGAIN;
- }
- if (data != nullptr) {
- size_t availableToWrite = mDataMQ->availableToWrite();
- if (dataSize > availableToWrite) {
- ALOGW("truncating write data from %lld to %lld due to insufficient data queue space",
- (long long)dataSize, (long long)availableToWrite);
- dataSize = availableToWrite;
- }
- if (!mDataMQ->write(data, dataSize)) {
- ALOGE("data message queue write failed for \"%s\"", cmdName);
- }
- }
- mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
-
- // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
- uint32_t efState = 0;
-retry:
- status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState);
- if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
- WriteStatus writeStatus;
- writeStatus.retval = Result::NOT_INITIALIZED;
- if (!mStatusMQ->read(&writeStatus)) {
- ALOGE("status message read failed for \"%s\"", cmdName);
- }
- if (writeStatus.retval == Result::OK) {
- ret = OK;
- callback(writeStatus);
- } else {
- ret = processReturn(cmdName, writeStatus.retval);
- }
- return ret;
- }
- if (ret == -EAGAIN || ret == -EINTR) {
- // Spurious wakeup. This normally retries no more than once.
- goto retry;
- }
- return ret;
-}
-
-status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
- std::unique_ptr<CommandMQ> tempCommandMQ;
- std::unique_ptr<DataMQ> tempDataMQ;
- std::unique_ptr<StatusMQ> tempStatusMQ;
- Result retval;
- pid_t halThreadPid, halThreadTid;
- Return<void> ret = mStream->prepareForWriting(
- 1, bufferSize,
- [&](Result r,
- const CommandMQ::Descriptor& commandMQ,
- const DataMQ::Descriptor& dataMQ,
- const StatusMQ::Descriptor& statusMQ,
- const ThreadInfo& halThreadInfo) {
- retval = r;
- if (retval == Result::OK) {
- tempCommandMQ.reset(new CommandMQ(commandMQ));
- tempDataMQ.reset(new DataMQ(dataMQ));
- tempStatusMQ.reset(new StatusMQ(statusMQ));
- if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
- }
- halThreadPid = halThreadInfo.pid;
- halThreadTid = halThreadInfo.tid;
- }
- });
- if (!ret.isOk() || retval != Result::OK) {
- return processReturn("prepareForWriting", ret, retval);
- }
- if (!tempCommandMQ || !tempCommandMQ->isValid() ||
- !tempDataMQ || !tempDataMQ->isValid() ||
- !tempStatusMQ || !tempStatusMQ->isValid() ||
- !mEfGroup) {
- ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
- ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
- "Command message queue for writing is invalid");
- ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for writing");
- ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for writing is invalid");
- ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for writing");
- ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
- "Status message queue for writing is invalid");
- ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
- return NO_INIT;
- }
- requestHalThreadPriority(halThreadPid, halThreadTid);
-
- mCommandMQ = std::move(tempCommandMQ);
- mDataMQ = std::move(tempDataMQ);
- mStatusMQ = std::move(tempStatusMQ);
- mWriterClient = gettid();
- return OK;
-}
-
-status_t StreamOutHalHidl::getRenderPosition(uint32_t *dspFrames) {
- if (mStream == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mStream->getRenderPosition(
- [&](Result r, uint32_t d) {
- retval = r;
- if (retval == Result::OK) {
- *dspFrames = d;
- }
- });
- return processReturn("getRenderPosition", ret, retval);
-}
-
-status_t StreamOutHalHidl::getNextWriteTimestamp(int64_t *timestamp) {
- if (mStream == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mStream->getNextWriteTimestamp(
- [&](Result r, int64_t t) {
- retval = r;
- if (retval == Result::OK) {
- *timestamp = t;
- }
- });
- return processReturn("getRenderPosition", ret, retval);
-}
-
-status_t StreamOutHalHidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
- if (mStream == 0) return NO_INIT;
- status_t status = processReturn(
- "setCallback", mStream->setCallback(new StreamOutCallback(this)));
- if (status == OK) {
- mCallback = callback;
- }
- return status;
-}
-
-status_t StreamOutHalHidl::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
- if (mStream == 0) return NO_INIT;
- Return<void> ret = mStream->supportsPauseAndResume(
- [&](bool p, bool r) {
- *supportsPause = p;
- *supportsResume = r;
- });
- return processReturn("supportsPauseAndResume", ret);
-}
-
-status_t StreamOutHalHidl::pause() {
- if (mStream == 0) return NO_INIT;
- return processReturn("pause", mStream->pause());
-}
-
-status_t StreamOutHalHidl::resume() {
- if (mStream == 0) return NO_INIT;
- return processReturn("pause", mStream->resume());
-}
-
-status_t StreamOutHalHidl::supportsDrain(bool *supportsDrain) {
- if (mStream == 0) return NO_INIT;
- return processReturn("supportsDrain", mStream->supportsDrain(), supportsDrain);
-}
-
-status_t StreamOutHalHidl::drain(bool earlyNotify) {
- if (mStream == 0) return NO_INIT;
- return processReturn(
- "drain", mStream->drain(earlyNotify ? AudioDrain::EARLY_NOTIFY : AudioDrain::ALL));
-}
-
-status_t StreamOutHalHidl::flush() {
- if (mStream == 0) return NO_INIT;
- return processReturn("pause", mStream->flush());
-}
-
-status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
- if (mStream == 0) return NO_INIT;
- if (mWriterClient == gettid() && mCommandMQ) {
- return callWriterThread(
- WriteCommand::GET_PRESENTATION_POSITION, "getPresentationPosition", nullptr, 0,
- [&](const WriteStatus& writeStatus) {
- *frames = writeStatus.reply.presentationPosition.frames;
- timestamp->tv_sec = writeStatus.reply.presentationPosition.timeStamp.tvSec;
- timestamp->tv_nsec = writeStatus.reply.presentationPosition.timeStamp.tvNSec;
- });
- } else {
- Result retval;
- Return<void> ret = mStream->getPresentationPosition(
- [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
- retval = r;
- if (retval == Result::OK) {
- *frames = hidlFrames;
- timestamp->tv_sec = hidlTimeStamp.tvSec;
- timestamp->tv_nsec = hidlTimeStamp.tvNSec;
- }
- });
- return processReturn("getPresentationPosition", ret, retval);
- }
-}
-
-status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
- // Audio HAL V2.0 does not support propagating source metadata
- return INVALID_OPERATION;
-}
-
-void StreamOutHalHidl::onWriteReady() {
- sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
- if (callback == 0) return;
- ALOGV("asyncCallback onWriteReady");
- callback->onWriteReady();
-}
-
-void StreamOutHalHidl::onDrainReady() {
- sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
- if (callback == 0) return;
- ALOGV("asyncCallback onDrainReady");
- callback->onDrainReady();
-}
-
-void StreamOutHalHidl::onError() {
- sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
- if (callback == 0) return;
- ALOGV("asyncCallback onError");
- callback->onError();
-}
-
-
-StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
- : StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
-}
-
-StreamInHalHidl::~StreamInHalHidl() {
- if (mStream != 0) {
- processReturn("close", mStream->close());
- mStream.clear();
- hardware::IPCThreadState::self()->flushCommands();
- }
- if (mEfGroup) {
- EventFlag::deleteEventFlag(&mEfGroup);
- }
-}
-
-status_t StreamInHalHidl::getFrameSize(size_t *size) {
- if (mStream == 0) return NO_INIT;
- return processReturn("getFrameSize", mStream->getFrameSize(), size);
-}
-
-status_t StreamInHalHidl::setGain(float gain) {
- if (mStream == 0) return NO_INIT;
- return processReturn("setGain", mStream->setGain(gain));
-}
-
-status_t StreamInHalHidl::read(void *buffer, size_t bytes, size_t *read) {
- if (mStream == 0) return NO_INIT;
- *read = 0;
-
- if (bytes == 0 && !mDataMQ) {
- // Can't determine the size for the MQ buffer. Wait for a non-empty read request.
- return OK;
- }
-
- status_t status;
- if (!mDataMQ && (status = prepareForReading(bytes)) != OK) {
- return status;
- }
-
- ReadParameters params;
- params.command = ReadCommand::READ;
- params.params.read = bytes;
- status = callReaderThread(params, "read",
- [&](const ReadStatus& readStatus) {
- const size_t availToRead = mDataMQ->availableToRead();
- if (!mDataMQ->read(static_cast<uint8_t*>(buffer), std::min(bytes, availToRead))) {
- ALOGE("data message queue read failed for \"read\"");
- }
- ALOGW_IF(availToRead != readStatus.reply.read,
- "HAL read report inconsistent: mq = %d, status = %d",
- (int32_t)availToRead, (int32_t)readStatus.reply.read);
- *read = readStatus.reply.read;
- });
- mStreamPowerLog.log(buffer, *read);
- return status;
-}
-
-status_t StreamInHalHidl::callReaderThread(
- const ReadParameters& params, const char* cmdName,
- StreamInHalHidl::ReaderCallback callback) {
- if (!mCommandMQ->write(¶ms)) {
- ALOGW("command message queue write failed");
- return -EAGAIN;
- }
- mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
-
- // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
- uint32_t efState = 0;
-retry:
- status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY), &efState);
- if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY)) {
- ReadStatus readStatus;
- readStatus.retval = Result::NOT_INITIALIZED;
- if (!mStatusMQ->read(&readStatus)) {
- ALOGE("status message read failed for \"%s\"", cmdName);
- }
- if (readStatus.retval == Result::OK) {
- ret = OK;
- callback(readStatus);
- } else {
- ret = processReturn(cmdName, readStatus.retval);
- }
- return ret;
- }
- if (ret == -EAGAIN || ret == -EINTR) {
- // Spurious wakeup. This normally retries no more than once.
- goto retry;
- }
- return ret;
-}
-
-status_t StreamInHalHidl::prepareForReading(size_t bufferSize) {
- std::unique_ptr<CommandMQ> tempCommandMQ;
- std::unique_ptr<DataMQ> tempDataMQ;
- std::unique_ptr<StatusMQ> tempStatusMQ;
- Result retval;
- pid_t halThreadPid, halThreadTid;
- Return<void> ret = mStream->prepareForReading(
- 1, bufferSize,
- [&](Result r,
- const CommandMQ::Descriptor& commandMQ,
- const DataMQ::Descriptor& dataMQ,
- const StatusMQ::Descriptor& statusMQ,
- const ThreadInfo& halThreadInfo) {
- retval = r;
- if (retval == Result::OK) {
- tempCommandMQ.reset(new CommandMQ(commandMQ));
- tempDataMQ.reset(new DataMQ(dataMQ));
- tempStatusMQ.reset(new StatusMQ(statusMQ));
- if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
- }
- halThreadPid = halThreadInfo.pid;
- halThreadTid = halThreadInfo.tid;
- }
- });
- if (!ret.isOk() || retval != Result::OK) {
- return processReturn("prepareForReading", ret, retval);
- }
- if (!tempCommandMQ || !tempCommandMQ->isValid() ||
- !tempDataMQ || !tempDataMQ->isValid() ||
- !tempStatusMQ || !tempStatusMQ->isValid() ||
- !mEfGroup) {
- ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
- ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
- "Command message queue for writing is invalid");
- ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for reading");
- ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for reading is invalid");
- ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for reading");
- ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
- "Status message queue for reading is invalid");
- ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
- return NO_INIT;
- }
- requestHalThreadPriority(halThreadPid, halThreadTid);
-
- mCommandMQ = std::move(tempCommandMQ);
- mDataMQ = std::move(tempDataMQ);
- mStatusMQ = std::move(tempStatusMQ);
- mReaderClient = gettid();
- return OK;
-}
-
-status_t StreamInHalHidl::getInputFramesLost(uint32_t *framesLost) {
- if (mStream == 0) return NO_INIT;
- return processReturn("getInputFramesLost", mStream->getInputFramesLost(), framesLost);
-}
-
-status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
- if (mStream == 0) return NO_INIT;
- if (mReaderClient == gettid() && mCommandMQ) {
- ReadParameters params;
- params.command = ReadCommand::GET_CAPTURE_POSITION;
- return callReaderThread(params, "getCapturePosition",
- [&](const ReadStatus& readStatus) {
- *frames = readStatus.reply.capturePosition.frames;
- *time = readStatus.reply.capturePosition.time;
- });
- } else {
- Result retval;
- Return<void> ret = mStream->getCapturePosition(
- [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
- retval = r;
- if (retval == Result::OK) {
- *frames = hidlFrames;
- *time = hidlTime;
- }
- });
- return processReturn("getCapturePosition", ret, retval);
- }
-}
-
-status_t StreamInHalHidl::getActiveMicrophones(
- std::vector<media::MicrophoneInfo> *microphones __unused) {
- if (mStream == 0) return NO_INIT;
- return INVALID_OPERATION;
-}
-
-status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
- // Audio HAL V2.0 does not support propagating sink metadata
- return INVALID_OPERATION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalLocal.cpp b/media/libaudiohal/2.0/StreamHalLocal.cpp
deleted file mode 100644
index 98107e5..0000000
--- a/media/libaudiohal/2.0/StreamHalLocal.cpp
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "StreamHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <hardware/audio.h>
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "StreamHalLocal.h"
-
-namespace android {
-
-StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
- : mDevice(device),
- mStream(stream) {
- // Instrument audio signal power logging.
- // Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
- mStreamPowerLog.init(mStream->get_sample_rate(mStream),
- mStream->get_channels(mStream),
- mStream->get_format(mStream));
- }
-}
-
-StreamHalLocal::~StreamHalLocal() {
- mStream = 0;
- mDevice.clear();
-}
-
-status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
- *rate = mStream->get_sample_rate(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getBufferSize(size_t *size) {
- *size = mStream->get_buffer_size(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
- *mask = mStream->get_channels(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getFormat(audio_format_t *format) {
- *format = mStream->get_format(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
- *sampleRate = mStream->get_sample_rate(mStream);
- *mask = mStream->get_channels(mStream);
- *format = mStream->get_format(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::setParameters(const String8& kvPairs) {
- return mStream->set_parameters(mStream, kvPairs.string());
-}
-
-status_t StreamHalLocal::getParameters(const String8& keys, String8 *values) {
- char *halValues = mStream->get_parameters(mStream, keys.string());
- if (halValues != NULL) {
- values->setTo(halValues);
- free(halValues);
- } else {
- values->clear();
- }
- return OK;
-}
-
-status_t StreamHalLocal::addEffect(sp<EffectHalInterface>) {
- LOG_ALWAYS_FATAL("Local streams can not have effects");
- return INVALID_OPERATION;
-}
-
-status_t StreamHalLocal::removeEffect(sp<EffectHalInterface>) {
- LOG_ALWAYS_FATAL("Local streams can not have effects");
- return INVALID_OPERATION;
-}
-
-status_t StreamHalLocal::standby() {
- return mStream->standby(mStream);
-}
-
-status_t StreamHalLocal::dump(int fd) {
- status_t status = mStream->dump(mStream, fd);
- mStreamPowerLog.dump(fd);
- return status;
-}
-
-status_t StreamHalLocal::setHalThreadPriority(int) {
- // Don't need to do anything as local hal is executed by audioflinger directly
- // on the same thread.
- return OK;
-}
-
-StreamOutHalLocal::StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device)
- : StreamHalLocal(&stream->common, device), mStream(stream) {
-}
-
-StreamOutHalLocal::~StreamOutHalLocal() {
- mCallback.clear();
- mDevice->closeOutputStream(mStream);
- mStream = 0;
-}
-
-status_t StreamOutHalLocal::getFrameSize(size_t *size) {
- *size = audio_stream_out_frame_size(mStream);
- return OK;
-}
-
-status_t StreamOutHalLocal::getLatency(uint32_t *latency) {
- *latency = mStream->get_latency(mStream);
- return OK;
-}
-
-status_t StreamOutHalLocal::setVolume(float left, float right) {
- if (mStream->set_volume == NULL) return INVALID_OPERATION;
- return mStream->set_volume(mStream, left, right);
-}
-
-status_t StreamOutHalLocal::write(const void *buffer, size_t bytes, size_t *written) {
- ssize_t writeResult = mStream->write(mStream, buffer, bytes);
- if (writeResult > 0) {
- *written = writeResult;
- mStreamPowerLog.log(buffer, *written);
- return OK;
- } else {
- *written = 0;
- return writeResult;
- }
-}
-
-status_t StreamOutHalLocal::getRenderPosition(uint32_t *dspFrames) {
- return mStream->get_render_position(mStream, dspFrames);
-}
-
-status_t StreamOutHalLocal::getNextWriteTimestamp(int64_t *timestamp) {
- if (mStream->get_next_write_timestamp == NULL) return INVALID_OPERATION;
- return mStream->get_next_write_timestamp(mStream, timestamp);
-}
-
-status_t StreamOutHalLocal::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
- if (mStream->set_callback == NULL) return INVALID_OPERATION;
- status_t result = mStream->set_callback(mStream, StreamOutHalLocal::asyncCallback, this);
- if (result == OK) {
- mCallback = callback;
- }
- return result;
-}
-
-// static
-int StreamOutHalLocal::asyncCallback(stream_callback_event_t event, void*, void *cookie) {
- // We act as if we gave a wp<StreamOutHalLocal> to HAL. This way we should handle
- // correctly the case when the callback is invoked while StreamOutHalLocal's destructor is
- // already running, because the destructor is invoked after the refcount has been atomically
- // decremented.
- wp<StreamOutHalLocal> weakSelf(static_cast<StreamOutHalLocal*>(cookie));
- sp<StreamOutHalLocal> self = weakSelf.promote();
- if (self == 0) return 0;
- sp<StreamOutHalInterfaceCallback> callback = self->mCallback.promote();
- if (callback == 0) return 0;
- ALOGV("asyncCallback() event %d", event);
- switch (event) {
- case STREAM_CBK_EVENT_WRITE_READY:
- callback->onWriteReady();
- break;
- case STREAM_CBK_EVENT_DRAIN_READY:
- callback->onDrainReady();
- break;
- case STREAM_CBK_EVENT_ERROR:
- callback->onError();
- break;
- default:
- ALOGW("asyncCallback() unknown event %d", event);
- break;
- }
- return 0;
-}
-
-status_t StreamOutHalLocal::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
- *supportsPause = mStream->pause != NULL;
- *supportsResume = mStream->resume != NULL;
- return OK;
-}
-
-status_t StreamOutHalLocal::pause() {
- if (mStream->pause == NULL) return INVALID_OPERATION;
- return mStream->pause(mStream);
-}
-
-status_t StreamOutHalLocal::resume() {
- if (mStream->resume == NULL) return INVALID_OPERATION;
- return mStream->resume(mStream);
-}
-
-status_t StreamOutHalLocal::supportsDrain(bool *supportsDrain) {
- *supportsDrain = mStream->drain != NULL;
- return OK;
-}
-
-status_t StreamOutHalLocal::drain(bool earlyNotify) {
- if (mStream->drain == NULL) return INVALID_OPERATION;
- return mStream->drain(mStream, earlyNotify ? AUDIO_DRAIN_EARLY_NOTIFY : AUDIO_DRAIN_ALL);
-}
-
-status_t StreamOutHalLocal::flush() {
- if (mStream->flush == NULL) return INVALID_OPERATION;
- return mStream->flush(mStream);
-}
-
-status_t StreamOutHalLocal::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
- if (mStream->get_presentation_position == NULL) return INVALID_OPERATION;
- return mStream->get_presentation_position(mStream, frames, timestamp);
-}
-
-status_t StreamOutHalLocal::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
- if (mStream->update_source_metadata == nullptr) {
- return INVALID_OPERATION;
- }
- const source_metadata_t metadata {
- .track_count = sourceMetadata.tracks.size(),
- // const cast is fine as it is in a const structure
- .tracks = const_cast<playback_track_metadata*>(sourceMetadata.tracks.data()),
- };
- mStream->update_source_metadata(mStream, &metadata);
- return OK;
-}
-
-status_t StreamOutHalLocal::start() {
- if (mStream->start == NULL) return INVALID_OPERATION;
- return mStream->start(mStream);
-}
-
-status_t StreamOutHalLocal::stop() {
- if (mStream->stop == NULL) return INVALID_OPERATION;
- return mStream->stop(mStream);
-}
-
-status_t StreamOutHalLocal::createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) {
- if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
- return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
-}
-
-status_t StreamOutHalLocal::getMmapPosition(struct audio_mmap_position *position) {
- if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
- return mStream->get_mmap_position(mStream, position);
-}
-
-StreamInHalLocal::StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device)
- : StreamHalLocal(&stream->common, device), mStream(stream) {
-}
-
-StreamInHalLocal::~StreamInHalLocal() {
- mDevice->closeInputStream(mStream);
- mStream = 0;
-}
-
-status_t StreamInHalLocal::getFrameSize(size_t *size) {
- *size = audio_stream_in_frame_size(mStream);
- return OK;
-}
-
-status_t StreamInHalLocal::setGain(float gain) {
- return mStream->set_gain(mStream, gain);
-}
-
-status_t StreamInHalLocal::read(void *buffer, size_t bytes, size_t *read) {
- ssize_t readResult = mStream->read(mStream, buffer, bytes);
- if (readResult > 0) {
- *read = readResult;
- mStreamPowerLog.log( buffer, *read);
- return OK;
- } else {
- *read = 0;
- return readResult;
- }
-}
-
-status_t StreamInHalLocal::getInputFramesLost(uint32_t *framesLost) {
- *framesLost = mStream->get_input_frames_lost(mStream);
- return OK;
-}
-
-status_t StreamInHalLocal::getCapturePosition(int64_t *frames, int64_t *time) {
- if (mStream->get_capture_position == NULL) return INVALID_OPERATION;
- return mStream->get_capture_position(mStream, frames, time);
-}
-
-status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
- if (mStream->update_sink_metadata == nullptr) {
- return INVALID_OPERATION;
- }
- const sink_metadata_t metadata {
- .track_count = sinkMetadata.tracks.size(),
- // const cast is fine as it is in a const structure
- .tracks = const_cast<record_track_metadata*>(sinkMetadata.tracks.data()),
- };
- mStream->update_sink_metadata(mStream, &metadata);
- return OK;
-}
-
-status_t StreamInHalLocal::start() {
- if (mStream->start == NULL) return INVALID_OPERATION;
- return mStream->start(mStream);
-}
-
-status_t StreamInHalLocal::stop() {
- if (mStream->stop == NULL) return INVALID_OPERATION;
- return mStream->stop(mStream);
-}
-
-status_t StreamInHalLocal::createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) {
- if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
- return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
-}
-
-status_t StreamInHalLocal::getMmapPosition(struct audio_mmap_position *position) {
- if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
- return mStream->get_mmap_position(mStream, position);
-}
-
-status_t StreamInHalLocal::getActiveMicrophones(
- std::vector<media::MicrophoneInfo> *microphones __unused) {
- return INVALID_OPERATION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.h b/media/libaudiohal/4.0/ConversionHelperHidl.h
deleted file mode 100644
index 8823a8d..0000000
--- a/media/libaudiohal/4.0/ConversionHelperHidl.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
-#define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
-
-#include <android/hardware/audio/4.0/types.h>
-#include <hidl/HidlSupport.h>
-#include <system/audio.h>
-#include <utils/String8.h>
-
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::MicrophoneInfo;
-using ::android::hardware::Return;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-
-namespace android {
-namespace V4_0 {
-
-class ConversionHelperHidl {
- protected:
- static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
- static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
- static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
- static void microphoneInfoToHal(const MicrophoneInfo& src,
- audio_microphone_characteristic_t *pDst);
-
- ConversionHelperHidl(const char* className);
-
- template<typename R, typename T>
- status_t processReturn(const char* funcName, const Return<R>& ret, T *retval) {
- if (ret.isOk()) {
- // This way it also works for enum class to unscoped enum conversion.
- *retval = static_cast<T>(static_cast<R>(ret));
- return OK;
- }
- return processReturn(funcName, ret);
- }
-
- template<typename T>
- status_t processReturn(const char* funcName, const Return<T>& ret) {
- if (!ret.isOk()) {
- emitError(funcName, ret.description().c_str());
- }
- return ret.isOk() ? OK : FAILED_TRANSACTION;
- }
-
- status_t processReturn(const char* funcName, const Return<hardware::audio::V4_0::Result>& ret) {
- if (!ret.isOk()) {
- emitError(funcName, ret.description().c_str());
- }
- return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
- }
-
- template<typename T>
- status_t processReturn(
- const char* funcName, const Return<T>& ret, hardware::audio::V4_0::Result retval) {
- if (!ret.isOk()) {
- emitError(funcName, ret.description().c_str());
- }
- return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
- }
-
- private:
- const char* mClassName;
-
- static status_t analyzeResult(const hardware::audio::V4_0::Result& result);
-
- void emitError(const char* funcName, const char* description);
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.h b/media/libaudiohal/4.0/DeviceHalHidl.h
deleted file mode 100644
index 0bd2175..0000000
--- a/media/libaudiohal/4.0/DeviceHalHidl.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/4.0/IDevice.h>
-#include <android/hardware/audio/4.0/IPrimaryDevice.h>
-#include <media/audiohal/DeviceHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-
-using ::android::hardware::audio::V4_0::IDevice;
-using ::android::hardware::audio::V4_0::IPrimaryDevice;
-using ::android::hardware::Return;
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
-{
- public:
- // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
- virtual status_t getSupportedDevices(uint32_t *devices);
-
- // Check to see if the audio hardware interface has been initialized.
- virtual status_t initCheck();
-
- // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
- virtual status_t setVoiceVolume(float volume);
-
- // Set the audio volume for all audio activities other than voice call.
- virtual status_t setMasterVolume(float volume);
-
- // Get the current master volume value for the HAL.
- virtual status_t getMasterVolume(float *volume);
-
- // Called when the audio mode changes.
- virtual status_t setMode(audio_mode_t mode);
-
- // Muting control.
- virtual status_t setMicMute(bool state);
- virtual status_t getMicMute(bool *state);
- virtual status_t setMasterMute(bool state);
- virtual status_t getMasterMute(bool *state);
-
- // Set global audio parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get global audio parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Returns audio input buffer size according to parameters passed.
- virtual status_t getInputBufferSize(const struct audio_config *config,
- size_t *size);
-
- // Creates and opens the audio hardware output stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream);
-
- // Creates and opens the audio hardware input stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream);
-
- // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
- virtual status_t supportsAudioPatches(bool *supportsPatches);
-
- // Creates an audio patch between several source and sink ports.
- virtual status_t createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch);
-
- // Releases an audio patch.
- virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
-
- // Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port *port);
-
- // Set audio port configuration.
- virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-
- // List microphones
- virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
- virtual status_t dump(int fd);
-
- private:
- friend class DevicesFactoryHalHidl;
- sp<IDevice> mDevice;
- sp<IPrimaryDevice> mPrimaryDevice; // Null if it's not a primary device.
-
- // Can not be constructed directly by clients.
- explicit DeviceHalHidl(const sp<IDevice>& device);
-
- // The destructor automatically closes the device.
- virtual ~DeviceHalHidl();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.h b/media/libaudiohal/4.0/DeviceHalLocal.h
deleted file mode 100644
index 08341a4..0000000
--- a/media/libaudiohal/4.0/DeviceHalLocal.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
-#define ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
-
-#include <hardware/audio.h>
-#include <media/audiohal/DeviceHalInterface.h>
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalLocal : public DeviceHalInterface
-{
- public:
- // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
- virtual status_t getSupportedDevices(uint32_t *devices);
-
- // Check to see if the audio hardware interface has been initialized.
- virtual status_t initCheck();
-
- // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
- virtual status_t setVoiceVolume(float volume);
-
- // Set the audio volume for all audio activities other than voice call.
- virtual status_t setMasterVolume(float volume);
-
- // Get the current master volume value for the HAL.
- virtual status_t getMasterVolume(float *volume);
-
- // Called when the audio mode changes.
- virtual status_t setMode(audio_mode_t mode);
-
- // Muting control.
- virtual status_t setMicMute(bool state);
- virtual status_t getMicMute(bool *state);
- virtual status_t setMasterMute(bool state);
- virtual status_t getMasterMute(bool *state);
-
- // Set global audio parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get global audio parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Returns audio input buffer size according to parameters passed.
- virtual status_t getInputBufferSize(const struct audio_config *config,
- size_t *size);
-
- // Creates and opens the audio hardware output stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream);
-
- // Creates and opens the audio hardware input stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream);
-
- // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
- virtual status_t supportsAudioPatches(bool *supportsPatches);
-
- // Creates an audio patch between several source and sink ports.
- virtual status_t createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch);
-
- // Releases an audio patch.
- virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
-
- // Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port *port);
-
- // Set audio port configuration.
- virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-
- // List microphones
- virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
- virtual status_t dump(int fd);
-
- void closeOutputStream(struct audio_stream_out *stream_out);
- void closeInputStream(struct audio_stream_in *stream_in);
-
- private:
- audio_hw_device_t *mDev;
-
- friend class DevicesFactoryHalLocal;
-
- // Can not be constructed directly by clients.
- explicit DeviceHalLocal(audio_hw_device_t *dev);
-
- // The destructor automatically closes the device.
- virtual ~DeviceHalLocal();
-
- uint32_t version() const { return mDev->common.version; }
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp b/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp
deleted file mode 100644
index c83194e..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "DevicesFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/4.0/IDevice.h>
-#include <media/audiohal/hidl/HalDeathHandler.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "DeviceHalHidl.h"
-#include "DevicesFactoryHalHidl.h"
-
-using ::android::hardware::audio::V4_0::IDevice;
-using ::android::hardware::audio::V4_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-namespace V4_0 {
-
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
- mDevicesFactory = IDevicesFactory::getService();
- if (mDevicesFactory != 0) {
- // It is assumed that DevicesFactory is owned by AudioFlinger
- // and thus have the same lifespan.
- mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
- } else {
- ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
- exit(1);
- }
- // The MSD factory is optional
- mDevicesFactoryMsd = IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD);
- // TODO: Register death handler, and add 'restart' directive to audioserver.rc
-}
-
-DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
-}
-
-status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- if (mDevicesFactory == 0) return NO_INIT;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevicesFactory->openDevice(
- name,
- [&](Result r, const sp<IDevice>& result) {
- retval = r;
- if (retval == Result::OK) {
- *device = new DeviceHalHidl(result);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
- else return NO_INIT;
- }
- return FAILED_TRANSACTION;
-}
-
-} // namespace V4_0
-} // namespace android
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHidl.h b/media/libaudiohal/4.0/DevicesFactoryHalHidl.h
deleted file mode 100644
index 114889b..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalHidl.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/4.0/IDevicesFactory.h>
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-#include "DeviceHalHidl.h"
-
-using ::android::hardware::audio::V4_0::IDevicesFactory;
-
-namespace android {
-namespace V4_0 {
-
-class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
-{
- public:
- // Opens a device with the specified name. To close the device, it is
- // necessary to release references to the returned object.
- virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
- private:
- friend class DevicesFactoryHalHybrid;
-
- sp<IDevicesFactory> mDevicesFactory;
- sp<IDevicesFactory> mDevicesFactoryMsd;
-
- // Can not be constructed directly by clients.
- DevicesFactoryHalHidl();
-
- virtual ~DevicesFactoryHalHidl();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp
deleted file mode 100644
index 7ff1ec7d..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DevicesFactoryHalHybrid"
-//#define LOG_NDEBUG 0
-
-#include <libaudiohal/4.0/DevicesFactoryHalHybrid.h>
-#include "DevicesFactoryHalLocal.h"
-#include "DevicesFactoryHalHidl.h"
-
-namespace android {
-namespace V4_0 {
-
-DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
- : mLocalFactory(new DevicesFactoryHalLocal()),
- mHidlFactory(new DevicesFactoryHalHidl()) {
-}
-
-DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
-}
-
-status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
- strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
- return mHidlFactory->openDevice(name, device);
- }
- return mLocalFactory->openDevice(name, device);
-}
-
-} // namespace V4_0
-} // namespace android
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalLocal.h b/media/libaudiohal/4.0/DevicesFactoryHalLocal.h
deleted file mode 100644
index bc1c521..0000000
--- a/media/libaudiohal/4.0/DevicesFactoryHalLocal.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
-
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-#include "DeviceHalLocal.h"
-
-namespace android {
-namespace V4_0 {
-
-class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
-{
- public:
- // Opens a device with the specified name. To close the device, it is
- // necessary to release references to the returned object.
- virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
- private:
- friend class DevicesFactoryHalHybrid;
-
- // Can not be constructed directly by clients.
- DevicesFactoryHalLocal() {}
-
- virtual ~DevicesFactoryHalLocal() {}
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/EffectBufferHalHidl.h b/media/libaudiohal/4.0/EffectBufferHalHidl.h
deleted file mode 100644
index 6d578c6..0000000
--- a/media/libaudiohal/4.0/EffectBufferHalHidl.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/effect/4.0/types.h>
-#include <android/hidl/memory/1.0/IMemory.h>
-#include <hidl/HidlSupport.h>
-#include <media/audiohal/EffectBufferHalInterface.h>
-#include <system/audio_effect.h>
-
-using android::hardware::audio::effect::V4_0::AudioBuffer;
-using android::hardware::hidl_memory;
-using android::hidl::memory::V1_0::IMemory;
-
-namespace android {
-namespace V4_0 {
-
-class EffectBufferHalHidl : public EffectBufferHalInterface
-{
- public:
- static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
- static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
-
- virtual audio_buffer_t* audioBuffer();
- virtual void* externalData() const;
-
- virtual size_t getSize() const override { return mBufferSize; }
-
- virtual void setExternalData(void* external);
- virtual void setFrameCount(size_t frameCount);
- virtual bool checkFrameCountChange();
-
- virtual void update();
- virtual void commit();
- virtual void update(size_t size);
- virtual void commit(size_t size);
-
- const AudioBuffer& hidlBuffer() const { return mHidlBuffer; }
-
- private:
- friend class EffectBufferHalInterface;
-
- static uint64_t makeUniqueId();
-
- const size_t mBufferSize;
- bool mFrameCountChanged;
- void* mExternalData;
- AudioBuffer mHidlBuffer;
- sp<IMemory> mMemory;
- audio_buffer_t mAudioBuffer;
-
- // Can not be constructed directly by clients.
- explicit EffectBufferHalHidl(size_t size);
-
- virtual ~EffectBufferHalHidl();
-
- status_t init();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/EffectHalHidl.h b/media/libaudiohal/4.0/EffectHalHidl.h
deleted file mode 100644
index 5a4dab1..0000000
--- a/media/libaudiohal/4.0/EffectHalHidl.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/effect/4.0/IEffect.h>
-#include <media/audiohal/EffectHalInterface.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
-#include <system/audio_effect.h>
-
-using ::android::hardware::audio::effect::V4_0::EffectBufferConfig;
-using ::android::hardware::audio::effect::V4_0::EffectConfig;
-using ::android::hardware::audio::effect::V4_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V4_0::IEffect;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-
-namespace android {
-namespace V4_0 {
-
-class EffectHalHidl : public EffectHalInterface
-{
- public:
- // Set the input buffer.
- virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
-
- // Set the output buffer.
- virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
-
- // Effect process function.
- virtual status_t process();
-
- // Process reverse stream function. This function is used to pass
- // a reference stream to the effect engine.
- virtual status_t processReverse();
-
- // Send a command and receive a response to/from effect engine.
- virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData);
-
- // Returns the effect descriptor.
- virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
-
- // Free resources on the remote side.
- virtual status_t close();
-
- // Whether it's a local implementation.
- virtual bool isLocal() const { return false; }
-
- uint64_t effectId() const { return mEffectId; }
-
- static void effectDescriptorToHal(
- const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor);
-
- private:
- friend class EffectsFactoryHalHidl;
- typedef MessageQueue<
- hardware::audio::effect::V4_0::Result, hardware::kSynchronizedReadWrite> StatusMQ;
-
- sp<IEffect> mEffect;
- const uint64_t mEffectId;
- sp<EffectBufferHalInterface> mInBuffer;
- sp<EffectBufferHalInterface> mOutBuffer;
- bool mBuffersChanged;
- std::unique_ptr<StatusMQ> mStatusMQ;
- EventFlag* mEfGroup;
-
- static status_t analyzeResult(const hardware::audio::effect::V4_0::Result& result);
- static void effectBufferConfigFromHal(
- const buffer_config_t& halConfig, EffectBufferConfig* config);
- static void effectBufferConfigToHal(
- const EffectBufferConfig& config, buffer_config_t* halConfig);
- static void effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config);
- static void effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig);
-
- // Can not be constructed directly by clients.
- EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
-
- // The destructor automatically releases the effect.
- virtual ~EffectHalHidl();
-
- status_t getConfigImpl(uint32_t cmdCode, uint32_t *replySize, void *pReplyData);
- status_t prepareForProcessing();
- bool needToResetBuffers();
- status_t processImpl(uint32_t mqFlag);
- status_t setConfigImpl(
- uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData);
- status_t setProcessBuffers();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamHalHidl.h b/media/libaudiohal/4.0/StreamHalHidl.h
deleted file mode 100644
index 2dda0f8..0000000
--- a/media/libaudiohal/4.0/StreamHalHidl.h
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
-
-#include <atomic>
-
-#include <android/hardware/audio/4.0/IStream.h>
-#include <android/hardware/audio/4.0/IStreamIn.h>
-#include <android/hardware/audio/4.0/IStreamOut.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
-#include <media/audiohal/StreamHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-#include "StreamPowerLog.h"
-
-using ::android::hardware::audio::V4_0::IStream;
-using ::android::hardware::audio::V4_0::IStreamIn;
-using ::android::hardware::audio::V4_0::IStreamOut;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::Return;
-using ReadParameters = ::android::hardware::audio::V4_0::IStreamIn::ReadParameters;
-using ReadStatus = ::android::hardware::audio::V4_0::IStreamIn::ReadStatus;
-using WriteCommand = ::android::hardware::audio::V4_0::IStreamOut::WriteCommand;
-using WriteStatus = ::android::hardware::audio::V4_0::IStreamOut::WriteStatus;
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalHidl;
-
-class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
-{
- public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
- // Return size of input/output buffer in bytes for this stream - eg. 4800.
- virtual status_t getBufferSize(size_t *size);
-
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
-
- // Set audio stream parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get audio stream parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Add or remove the effect on the stream.
- virtual status_t addEffect(sp<EffectHalInterface> effect);
- virtual status_t removeEffect(sp<EffectHalInterface> effect);
-
- // Put the audio hardware input/output into standby mode.
- virtual status_t standby();
-
- virtual status_t dump(int fd);
-
- // Start a stream operating in mmap mode.
- virtual status_t start();
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop();
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info);
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
- // Set the priority of the thread that interacts with the HAL
- // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
- virtual status_t setHalThreadPriority(int priority);
-
- protected:
- // Subclasses can not be constructed directly by clients.
- explicit StreamHalHidl(IStream *stream);
-
- // The destructor automatically closes the stream.
- virtual ~StreamHalHidl();
-
- status_t getCachedBufferSize(size_t *size);
-
- bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
-
- // mStreamPowerLog is used for audio signal power logging.
- StreamPowerLog mStreamPowerLog;
-
- private:
- const int HAL_THREAD_PRIORITY_DEFAULT = -1;
- IStream *mStream;
- int mHalThreadPriority;
- size_t mCachedBufferSize;
-};
-
-class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Return the audio hardware driver estimated latency in milliseconds.
- virtual status_t getLatency(uint32_t *latency);
-
- // Use this method in situations where audio mixing is done in the hardware.
- virtual status_t setVolume(float left, float right);
-
- // Write audio buffer to driver.
- virtual status_t write(const void *buffer, size_t bytes, size_t *written);
-
- // Return the number of audio frames written by the audio dsp to DAC since
- // the output has exited standby.
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-
- // Get the local time at which the next write to the audio driver will be presented.
- virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
- // Set the callback for notifying completion of non-blocking write and drain.
- virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
-
- // Returns whether pause and resume operations are supported.
- virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t pause();
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t resume();
-
- // Returns whether drain operation is supported.
- virtual status_t supportsDrain(bool *supportsDrain);
-
- // Requests notification when data buffered by the driver/hardware has been played.
- virtual status_t drain(bool earlyNotify);
-
- // Notifies to the audio driver to flush the queued data.
- virtual status_t flush();
-
- // Return a recent count of the number of audio frames presented to an external observer.
- virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
-
- // Called when the metadata of the stream's source has been changed.
- status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
-
- // Methods used by StreamOutCallback (HIDL).
- void onWriteReady();
- void onDrainReady();
- void onError();
-
- private:
- friend class DeviceHalHidl;
- typedef MessageQueue<WriteCommand, hardware::kSynchronizedReadWrite> CommandMQ;
- typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
- typedef MessageQueue<WriteStatus, hardware::kSynchronizedReadWrite> StatusMQ;
-
- wp<StreamOutHalInterfaceCallback> mCallback;
- sp<IStreamOut> mStream;
- std::unique_ptr<CommandMQ> mCommandMQ;
- std::unique_ptr<DataMQ> mDataMQ;
- std::unique_ptr<StatusMQ> mStatusMQ;
- std::atomic<pid_t> mWriterClient;
- EventFlag* mEfGroup;
-
- // Can not be constructed directly by clients.
- StreamOutHalHidl(const sp<IStreamOut>& stream);
-
- virtual ~StreamOutHalHidl();
-
- using WriterCallback = std::function<void(const WriteStatus& writeStatus)>;
- status_t callWriterThread(
- WriteCommand cmd, const char* cmdName,
- const uint8_t* data, size_t dataSize, WriterCallback callback);
- status_t prepareForWriting(size_t bufferSize);
-};
-
-class StreamInHalHidl : public StreamInHalInterface, public StreamHalHidl {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Set the input gain for the audio driver.
- virtual status_t setGain(float gain);
-
- // Read audio buffer in from driver.
- virtual status_t read(void *buffer, size_t bytes, size_t *read);
-
- // Return the amount of input frames lost in the audio driver.
- virtual status_t getInputFramesLost(uint32_t *framesLost);
-
- // Return a recent count of the number of audio frames received and
- // the clock time associated with that frame count.
- virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
-
- // Get active microphones
- virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
- // Called when the metadata of the stream's sink has been changed.
- status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
-
- private:
- friend class DeviceHalHidl;
- typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
- typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
- typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
-
- sp<IStreamIn> mStream;
- std::unique_ptr<CommandMQ> mCommandMQ;
- std::unique_ptr<DataMQ> mDataMQ;
- std::unique_ptr<StatusMQ> mStatusMQ;
- std::atomic<pid_t> mReaderClient;
- EventFlag* mEfGroup;
-
- // Can not be constructed directly by clients.
- StreamInHalHidl(const sp<IStreamIn>& stream);
-
- virtual ~StreamInHalHidl();
-
- using ReaderCallback = std::function<void(const ReadStatus& readStatus)>;
- status_t callReaderThread(
- const ReadParameters& params, const char* cmdName, ReaderCallback callback);
- status_t prepareForReading(size_t bufferSize);
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamHalLocal.h b/media/libaudiohal/4.0/StreamHalLocal.h
deleted file mode 100644
index 7237509..0000000
--- a/media/libaudiohal/4.0/StreamHalLocal.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
-#define ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
-
-#include <media/audiohal/StreamHalInterface.h>
-#include "StreamPowerLog.h"
-
-namespace android {
-namespace V4_0 {
-
-class DeviceHalLocal;
-
-class StreamHalLocal : public virtual StreamHalInterface
-{
- public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
- // Return size of input/output buffer in bytes for this stream - eg. 4800.
- virtual status_t getBufferSize(size_t *size);
-
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
-
- // Set audio stream parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get audio stream parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Add or remove the effect on the stream.
- virtual status_t addEffect(sp<EffectHalInterface> effect);
- virtual status_t removeEffect(sp<EffectHalInterface> effect);
-
- // Put the audio hardware input/output into standby mode.
- virtual status_t standby();
-
- virtual status_t dump(int fd);
-
- // Start a stream operating in mmap mode.
- virtual status_t start() = 0;
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop() = 0;
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) = 0;
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
-
- // Set the priority of the thread that interacts with the HAL
- // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
- virtual status_t setHalThreadPriority(int priority);
-
- protected:
- // Subclasses can not be constructed directly by clients.
- StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device);
-
- // The destructor automatically closes the stream.
- virtual ~StreamHalLocal();
-
- sp<DeviceHalLocal> mDevice;
-
- // mStreamPowerLog is used for audio signal power logging.
- StreamPowerLog mStreamPowerLog;
-
- private:
- audio_stream_t *mStream;
-};
-
-class StreamOutHalLocal : public StreamOutHalInterface, public StreamHalLocal {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Return the audio hardware driver estimated latency in milliseconds.
- virtual status_t getLatency(uint32_t *latency);
-
- // Use this method in situations where audio mixing is done in the hardware.
- virtual status_t setVolume(float left, float right);
-
- // Write audio buffer to driver.
- virtual status_t write(const void *buffer, size_t bytes, size_t *written);
-
- // Return the number of audio frames written by the audio dsp to DAC since
- // the output has exited standby.
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-
- // Get the local time at which the next write to the audio driver will be presented.
- virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
- // Set the callback for notifying completion of non-blocking write and drain.
- virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
-
- // Returns whether pause and resume operations are supported.
- virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t pause();
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t resume();
-
- // Returns whether drain operation is supported.
- virtual status_t supportsDrain(bool *supportsDrain);
-
- // Requests notification when data buffered by the driver/hardware has been played.
- virtual status_t drain(bool earlyNotify);
-
- // Notifies to the audio driver to flush the queued data.
- virtual status_t flush();
-
- // Return a recent count of the number of audio frames presented to an external observer.
- virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
-
- // Start a stream operating in mmap mode.
- virtual status_t start();
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop();
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info);
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
- // Called when the metadata of the stream's source has been changed.
- status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
-
- private:
- audio_stream_out_t *mStream;
- wp<StreamOutHalInterfaceCallback> mCallback;
-
- friend class DeviceHalLocal;
-
- // Can not be constructed directly by clients.
- StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device);
-
- virtual ~StreamOutHalLocal();
-
- static int asyncCallback(stream_callback_event_t event, void *param, void *cookie);
-};
-
-class StreamInHalLocal : public StreamInHalInterface, public StreamHalLocal {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Set the input gain for the audio driver.
- virtual status_t setGain(float gain);
-
- // Read audio buffer in from driver.
- virtual status_t read(void *buffer, size_t bytes, size_t *read);
-
- // Return the amount of input frames lost in the audio driver.
- virtual status_t getInputFramesLost(uint32_t *framesLost);
-
- // Return a recent count of the number of audio frames received and
- // the clock time associated with that frame count.
- virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
-
- // Start a stream operating in mmap mode.
- virtual status_t start();
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop();
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info);
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
- // Get active microphones
- virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
-
- // Called when the metadata of the stream's sink has been changed.
- status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
-
- private:
- audio_stream_in_t *mStream;
-
- friend class DeviceHalLocal;
-
- // Can not be constructed directly by clients.
- StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device);
-
- virtual ~StreamInHalLocal();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamPowerLog.h b/media/libaudiohal/4.0/StreamPowerLog.h
deleted file mode 100644
index 57b7201..0000000
--- a/media/libaudiohal/4.0/StreamPowerLog.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
-#define ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
-
-#include <audio_utils/clock.h>
-#include <audio_utils/PowerLog.h>
-#include <cutils/properties.h>
-#include <system/audio.h>
-
-namespace android {
-namespace V4_0 {
-
-class StreamPowerLog {
-public:
- StreamPowerLog() :
- mIsUserDebugOrEngBuild(is_userdebug_or_eng_build()),
- mPowerLog(nullptr),
- mFrameSize(0) {
- // use init() to set up the power log.
- }
-
- ~StreamPowerLog() {
- power_log_destroy(mPowerLog); // OK for null mPowerLog
- mPowerLog = nullptr;
- }
-
- // A one-time initialization (do not call twice) before using StreamPowerLog.
- void init(uint32_t sampleRate, audio_channel_mask_t channelMask, audio_format_t format) {
- if (mPowerLog == nullptr) {
- // Note: A way to get channel count for both input and output channel masks
- // but does not check validity of the channel mask.
- const uint32_t channelCount = popcount(audio_channel_mask_get_bits(channelMask));
- mFrameSize = channelCount * audio_bytes_per_sample(format);
- if (mFrameSize > 0) {
- const size_t kPowerLogFramesPerEntry =
- (long long)sampleRate * kPowerLogSamplingIntervalMs / 1000;
- mPowerLog = power_log_create(
- sampleRate,
- channelCount,
- format,
- kPowerLogEntries,
- kPowerLogFramesPerEntry);
- }
- }
- // mPowerLog may be NULL (not the right build, format not accepted, etc.).
- }
-
- // Dump the power log to fd.
- void dump(int fd) const {
- // OK for null mPowerLog
- (void)power_log_dump(
- mPowerLog, fd, " " /* prefix */, kPowerLogLines, 0 /* limit_ns */);
- }
-
- // Log the audio data contained in buffer.
- void log(const void *buffer, size_t sizeInBytes) const {
- if (mPowerLog != nullptr) { // mFrameSize is always nonzero if mPowerLog exists.
- power_log_log(
- mPowerLog, buffer, sizeInBytes / mFrameSize, audio_utils_get_real_time_ns());
- }
- }
-
- bool isUserDebugOrEngBuild() const {
- return mIsUserDebugOrEngBuild;
- }
-
-private:
-
- static inline bool is_userdebug_or_eng_build() {
- char value[PROPERTY_VALUE_MAX];
- (void)property_get("ro.build.type", value, "unknown"); // ignore actual length
- return strcmp(value, "userdebug") == 0 || strcmp(value, "eng") == 0;
- }
-
- // Audio signal power log configuration.
- static const size_t kPowerLogLines = 40;
- static const size_t kPowerLogSamplingIntervalMs = 50;
- static const size_t kPowerLogEntries = (1 /* minutes */ * 60 /* seconds */ * 1000 /* msec */
- / kPowerLogSamplingIntervalMs);
-
- const bool mIsUserDebugOrEngBuild;
- power_log_t *mPowerLog;
- size_t mFrameSize;
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
diff --git a/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h b/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h
deleted file mode 100644
index abf6de0..0000000
--- a/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
-
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-namespace android {
-namespace V4_0 {
-
-class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
-{
- public:
- // Opens a device with the specified name. To close the device, it is
- // necessary to release references to the returned object.
- virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
- private:
- friend class DevicesFactoryHalInterface;
-
- // Can not be constructed directly by clients.
- DevicesFactoryHalHybrid();
-
- virtual ~DevicesFactoryHalHybrid();
-
- sp<DevicesFactoryHalInterface> mLocalFactory;
- sp<DevicesFactoryHalInterface> mHidlFactory;
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
diff --git a/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h b/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h
deleted file mode 100644
index 680b7a1..0000000
--- a/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
-#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
-
-#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/4.0/types.h>
-#include <media/audiohal/EffectsFactoryHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-
-namespace android {
-namespace V4_0 {
-
-using ::android::hardware::audio::effect::V4_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V4_0::IEffectsFactory;
-using ::android::hardware::hidl_vec;
-
-class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
-{
- public:
- // Returns the number of different effects in all loaded libraries.
- virtual status_t queryNumberEffects(uint32_t *pNumEffects);
-
- // Returns a descriptor of the next available effect.
- virtual status_t getDescriptor(uint32_t index,
- effect_descriptor_t *pDescriptor);
-
- virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
- effect_descriptor_t *pDescriptor);
-
- // Creates an effect engine of the specified type.
- // To release the effect engine, it is necessary to release references
- // to the returned effect object.
- virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
- int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect);
-
- virtual status_t dumpEffects(int fd);
-
- status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) override;
- status_t mirrorBuffer(void* external, size_t size,
- sp<EffectBufferHalInterface>* buffer) override;
-
- private:
- friend class EffectsFactoryHalInterface;
-
- sp<IEffectsFactory> mEffectsFactory;
- hidl_vec<EffectDescriptor> mLastDescriptors;
-
- // Can not be constructed directly by clients.
- EffectsFactoryHalHidl();
- virtual ~EffectsFactoryHalHidl();
-
- status_t queryAllDescriptors();
-};
-
-} // namespace V4_0
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 3a5df27..0ff0d4a 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -8,6 +8,7 @@
cflags: [
"-Wall",
+ "-Wextra",
"-Werror",
],
diff --git a/media/libaudiohal/DevicesFactoryHalInterface.cpp b/media/libaudiohal/DevicesFactoryHalInterface.cpp
index 4c8eaf6..e631ace 100644
--- a/media/libaudiohal/DevicesFactoryHalInterface.cpp
+++ b/media/libaudiohal/DevicesFactoryHalInterface.cpp
@@ -17,18 +17,17 @@
#include <android/hardware/audio/2.0/IDevicesFactory.h>
#include <android/hardware/audio/4.0/IDevicesFactory.h>
-#include <DevicesFactoryHalHybrid.h>
-#include <libaudiohal/4.0/DevicesFactoryHalHybrid.h>
+#include <libaudiohal/FactoryHalHidl.h>
namespace android {
// static
sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
if (hardware::audio::V4_0::IDevicesFactory::getService() != nullptr) {
- return new V4_0::DevicesFactoryHalHybrid();
+ return V4_0::createDevicesFactoryHal();
}
if (hardware::audio::V2_0::IDevicesFactory::getService() != nullptr) {
- return new DevicesFactoryHalHybrid();
+ return V2_0::createDevicesFactoryHal();
}
return nullptr;
}
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
index ead1fa2..f7734a8 100644
--- a/media/libaudiohal/EffectsFactoryHalInterface.cpp
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -17,19 +17,17 @@
#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
-#include <EffectsFactoryHalHidl.h>
-#include <libaudiohal/4.0/EffectsFactoryHalHidl.h>
-
+#include <libaudiohal/FactoryHalHidl.h>
namespace android {
// static
sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
if (hardware::audio::effect::V4_0::IEffectsFactory::getService() != nullptr) {
- return new V4_0::EffectsFactoryHalHidl();
+ return V4_0::createEffectsFactoryHal();
}
if (hardware::audio::effect::V2_0::IEffectsFactory::getService() != nullptr) {
- return new EffectsFactoryHalHidl();
+ return V2_0::createEffectsFactoryHal();
}
return nullptr;
}
diff --git a/media/libaudiohal/4.0/Android.bp b/media/libaudiohal/impl/Android.bp
similarity index 68%
rename from media/libaudiohal/4.0/Android.bp
rename to media/libaudiohal/impl/Android.bp
index 833defa..3827336 100644
--- a/media/libaudiohal/4.0/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -1,5 +1,5 @@
-cc_library_shared {
- name: "libaudiohal@4.0",
+cc_defaults {
+ name: "libaudiohal_default",
srcs: [
"DeviceHalLocal.cpp",
@@ -24,28 +24,30 @@
"-Werror",
],
shared_libs: [
+ "android.hardware.audio.common-util",
+ "android.hardware.audio.common@2.0",
+ "android.hardware.audio.common@4.0",
+ "android.hardware.audio.effect@2.0",
+ "android.hardware.audio.effect@4.0",
+ "android.hardware.audio@2.0",
+ "android.hardware.audio@4.0",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
"libaudiohal_deathhandler",
"libaudioutils",
+ "libbase",
"libbinder",
"libcutils",
- "liblog",
- "libutils",
- "libhardware",
- "libbase",
"libfmq",
- "libhwbinder",
+ "libhardware",
"libhidlbase",
"libhidlmemory",
"libhidltransport",
- "android.hardware.audio@4.0",
- "android.hardware.audio.common-util",
- "android.hardware.audio.common@4.0",
- "android.hardware.audio.common@4.0-util",
- "android.hardware.audio.effect@4.0",
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
+ "libhwbinder",
+ "liblog",
"libmedia_helper",
"libmediautils",
+ "libutils",
],
header_libs: [
"android.hardware.audio.common.util@all-versions",
@@ -56,3 +58,29 @@
"libfmq",
],
}
+
+cc_library_shared {
+ name: "libaudiohal@2.0",
+ defaults: ["libaudiohal_default"],
+ shared_libs: [
+ "android.hardware.audio.common@2.0-util",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=2",
+ "-DMINOR_VERSION=0",
+ "-include VersionMacro.h",
+ ]
+}
+
+cc_library_shared {
+ name: "libaudiohal@4.0",
+ defaults: ["libaudiohal_default"],
+ shared_libs: [
+ "android.hardware.audio.common@4.0-util",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=4",
+ "-DMINOR_VERSION=0",
+ "-include VersionMacro.h",
+ ]
+}
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
similarity index 90%
rename from media/libaudiohal/4.0/ConversionHelperHidl.cpp
rename to media/libaudiohal/impl/ConversionHelperHidl.cpp
index fe27504..5d12fad 100644
--- a/media/libaudiohal/4.0/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -22,15 +22,18 @@
#include "ConversionHelperHidl.h"
-using ::android::hardware::audio::V4_0::AudioMicrophoneChannelMapping;
-using ::android::hardware::audio::V4_0::AudioMicrophoneDirectionality;
-using ::android::hardware::audio::V4_0::AudioMicrophoneLocation;
-using ::android::hardware::audio::V4_0::DeviceAddress;
-using ::android::hardware::audio::V4_0::MicrophoneInfo;
-using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::audio::CPP_VERSION::Result;
+
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneChannelMapping;
+using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneDirectionality;
+using ::android::hardware::audio::CPP_VERSION::AudioMicrophoneLocation;
+using ::android::hardware::audio::CPP_VERSION::DeviceAddress;
+using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
+#endif
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
// static
status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
@@ -106,8 +109,9 @@
ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
}
+#if MAJOR_VERSION == 4
// TODO: Use the same implementation in the hal when it moves to a util library.
-std::string deviceAddressToHal(const DeviceAddress& address) {
+static std::string deviceAddressToHal(const DeviceAddress& address) {
// HAL assumes that the address is NUL-terminated.
char halAddress[AUDIO_DEVICE_MAX_ADDRESS_LEN];
memset(halAddress, 0, sizeof(halAddress));
@@ -141,7 +145,7 @@
//local conversion helpers
-audio_microphone_channel_mapping_t channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
+static audio_microphone_channel_mapping_t channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
switch (mapping) {
case AudioMicrophoneChannelMapping::UNUSED:
return AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
@@ -154,7 +158,7 @@
}
}
-audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
+static audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
switch (location) {
case AudioMicrophoneLocation::UNKNOWN:
return AUDIO_MICROPHONE_LOCATION_UNKNOWN;
@@ -168,7 +172,7 @@
LOG_ALWAYS_FATAL("Unknown locationToHal conversion %d", location);
}
}
-audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
+static audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
switch (dir) {
case AudioMicrophoneDirectionality::UNKNOWN:
return AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN;
@@ -187,9 +191,8 @@
}
}
-// static
-void ConversionHelperHidl::microphoneInfoToHal(const MicrophoneInfo& src,
- audio_microphone_characteristic_t *pDst) {
+void microphoneInfoToHal(const MicrophoneInfo& src,
+ audio_microphone_characteristic_t *pDst) {
if (pDst != NULL) {
snprintf(pDst->device_id, sizeof(pDst->device_id),
"%s", src.deviceId.c_str());
@@ -232,6 +235,7 @@
pDst->orientation.z = src.orientation.z;
}
}
+#endif
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/ConversionHelperHidl.h b/media/libaudiohal/impl/ConversionHelperHidl.h
similarity index 78%
rename from media/libaudiohal/2.0/ConversionHelperHidl.h
rename to media/libaudiohal/impl/ConversionHelperHidl.h
index c356f37..1a9319f 100644
--- a/media/libaudiohal/2.0/ConversionHelperHidl.h
+++ b/media/libaudiohal/impl/ConversionHelperHidl.h
@@ -18,15 +18,20 @@
#define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
#include <android/hardware/audio/2.0/types.h>
+#include <android/hardware/audio/4.0/types.h>
#include <hidl/HidlSupport.h>
+#include <system/audio.h>
#include <utils/String8.h>
-using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using CoreResult = ::android::hardware::audio::CPP_VERSION::Result;
+
using ::android::hardware::Return;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
namespace android {
+namespace CPP_VERSION {
class ConversionHelperHidl {
protected:
@@ -54,7 +59,7 @@
return ret.isOk() ? OK : FAILED_TRANSACTION;
}
- status_t processReturn(const char* funcName, const Return<hardware::audio::V2_0::Result>& ret) {
+ status_t processReturn(const char* funcName, const Return<CoreResult>& ret) {
if (!ret.isOk()) {
emitError(funcName, ret.description().c_str());
}
@@ -63,7 +68,7 @@
template<typename T>
status_t processReturn(
- const char* funcName, const Return<T>& ret, hardware::audio::V2_0::Result retval) {
+ const char* funcName, const Return<T>& ret, CoreResult retval) {
if (!ret.isOk()) {
emitError(funcName, ret.description().c_str());
}
@@ -73,11 +78,18 @@
private:
const char* mClassName;
- static status_t analyzeResult(const hardware::audio::V2_0::Result& result);
+ static status_t analyzeResult(const CoreResult& result);
void emitError(const char* funcName, const char* description);
};
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
+void microphoneInfoToHal(const MicrophoneInfo& src,
+ audio_microphone_characteristic_t *pDst);
+#endif
+
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
similarity index 88%
rename from media/libaudiohal/4.0/DeviceHalHidl.cpp
rename to media/libaudiohal/impl/DeviceHalHidl.cpp
index 6facca9..723e2eb 100644
--- a/media/libaudiohal/4.0/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -19,6 +19,7 @@
#define LOG_TAG "DeviceHalHidl"
//#define LOG_NDEBUG 0
+#include <android/hardware/audio/2.0/IPrimaryDevice.h>
#include <android/hardware/audio/4.0/IPrimaryDevice.h>
#include <cutils/native_handle.h>
#include <hwbinder/IPCThreadState.h>
@@ -31,27 +32,30 @@
#include "StreamHalHidl.h"
#include "VersionUtils.h"
-using ::android::hardware::audio::common::V4_0::AudioConfig;
-using ::android::hardware::audio::common::V4_0::AudioDevice;
-using ::android::hardware::audio::common::V4_0::AudioInputFlag;
-using ::android::hardware::audio::common::V4_0::AudioOutputFlag;
-using ::android::hardware::audio::common::V4_0::AudioPatchHandle;
-using ::android::hardware::audio::common::V4_0::AudioPort;
-using ::android::hardware::audio::common::V4_0::AudioPortConfig;
-using ::android::hardware::audio::common::V4_0::AudioMode;
-using ::android::hardware::audio::common::V4_0::AudioSource;
-using ::android::hardware::audio::common::V4_0::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::AudioConfig;
+using ::android::hardware::audio::common::CPP_VERSION::AudioDevice;
+using ::android::hardware::audio::common::CPP_VERSION::AudioInputFlag;
+using ::android::hardware::audio::common::CPP_VERSION::AudioOutputFlag;
+using ::android::hardware::audio::common::CPP_VERSION::AudioPatchHandle;
+using ::android::hardware::audio::common::CPP_VERSION::AudioPort;
+using ::android::hardware::audio::common::CPP_VERSION::AudioPortConfig;
+using ::android::hardware::audio::common::CPP_VERSION::AudioMode;
+using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
+using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
using ::android::hardware::audio::common::utils::mkEnumConverter;
-using ::android::hardware::audio::V4_0::DeviceAddress;
-using ::android::hardware::audio::V4_0::IPrimaryDevice;
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::Result;
-using ::android::hardware::audio::V4_0::SinkMetadata;
+using ::android::hardware::audio::CPP_VERSION::DeviceAddress;
+using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::Result;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::CPP_VERSION::SinkMetadata;
+#endif
+
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
namespace {
@@ -259,7 +263,9 @@
hidlDevice,
hidlConfig,
mkEnumConverter<AudioOutputFlag>(flags),
+#if MAJOR_VERSION == 4
{} /* metadata */,
+#endif
[&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
retval = r;
if (retval == Result::OK) {
@@ -285,15 +291,19 @@
AudioConfig hidlConfig;
HidlUtils::audioConfigFromHal(*config, &hidlConfig);
Result retval = Result::NOT_INITIALIZED;
+#if MAJOR_VERSION == 2
+ auto sourceMetadata = AudioSource(source);
+#elif MAJOR_VERSION == 4
// TODO: correctly propagate the tracks sources and volume
// for now, only send the main source at 1dbfs
- SinkMetadata metadata = {{{AudioSource(source), 1}}};
+ SinkMetadata sourceMetadata = {{{AudioSource(source), 1}}};
+#endif
Return<void> ret = mDevice->openInputStream(
handle,
hidlDevice,
hidlConfig,
- flags,
- metadata,
+ mkEnumConverter<AudioInputFlag>(flags),
+ sourceMetadata,
[&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
retval = r;
if (retval == Result::OK) {
@@ -359,6 +369,13 @@
return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
}
+#if MAJOR_VERSION == 2
+status_t DeviceHalHidl::getMicrophones(
+ std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
+ if (mDevice == 0) return NO_INIT;
+ return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
status_t DeviceHalHidl::getMicrophones(std::vector<media::MicrophoneInfo> *microphonesInfo) {
if (mDevice == 0) return NO_INIT;
Result retval;
@@ -375,6 +392,7 @@
});
return processReturn("getMicrophones", ret, retval);
}
+#endif
status_t DeviceHalHidl::dump(int fd) {
if (mDevice == 0) return NO_INIT;
@@ -385,5 +403,5 @@
return processReturn("dump", ret);
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
similarity index 94%
rename from media/libaudiohal/2.0/DeviceHalHidl.h
rename to media/libaudiohal/impl/DeviceHalHidl.h
index 3c1cb59..fb5e7e7 100644
--- a/media/libaudiohal/2.0/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -18,16 +18,19 @@
#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
#include <android/hardware/audio/2.0/IDevice.h>
+#include <android/hardware/audio/4.0/IDevice.h>
#include <android/hardware/audio/2.0/IPrimaryDevice.h>
+#include <android/hardware/audio/4.0/IPrimaryDevice.h>
#include <media/audiohal/DeviceHalInterface.h>
#include "ConversionHelperHidl.h"
-using ::android::hardware::audio::V2_0::IDevice;
-using ::android::hardware::audio::V2_0::IPrimaryDevice;
+using ::android::hardware::audio::CPP_VERSION::IDevice;
+using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
using ::android::hardware::Return;
namespace android {
+namespace CPP_VERSION {
class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
{
@@ -124,6 +127,7 @@
virtual ~DeviceHalHidl();
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
similarity index 96%
rename from media/libaudiohal/4.0/DeviceHalLocal.cpp
rename to media/libaudiohal/impl/DeviceHalLocal.cpp
index a245dd9..14e26f5 100644
--- a/media/libaudiohal/4.0/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -23,7 +23,7 @@
#include "StreamHalLocal.h"
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
: mDev(dev) {
@@ -185,6 +185,12 @@
return INVALID_OPERATION;
}
+#if MAJOR_VERSION == 2
+status_t DeviceHalLocal::getMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
status_t DeviceHalLocal::getMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
if (mDev->get_microphones == NULL) return INVALID_OPERATION;
size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
@@ -196,6 +202,7 @@
}
return status;
}
+#endif
status_t DeviceHalLocal::dump(int fd) {
return mDev->dump(mDev, fd);
@@ -209,5 +216,5 @@
mDev->close_input_stream(mDev, stream_in);
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
similarity index 98%
rename from media/libaudiohal/2.0/DeviceHalLocal.h
rename to media/libaudiohal/impl/DeviceHalLocal.h
index aec201a..18bd879 100644
--- a/media/libaudiohal/2.0/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -21,6 +21,7 @@
#include <media/audiohal/DeviceHalInterface.h>
namespace android {
+namespace CPP_VERSION {
class DeviceHalLocal : public DeviceHalInterface
{
@@ -122,6 +123,7 @@
uint32_t version() const { return mDev->common.version; }
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
new file mode 100644
index 0000000..28001da
--- /dev/null
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+#include <vector>
+
+#define LOG_TAG "DevicesFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IDevice.h>
+#include <android/hardware/audio/4.0/IDevice.h>
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "DeviceHalHidl.h"
+#include "DevicesFactoryHalHidl.h"
+
+using ::android::hardware::audio::CPP_VERSION::IDevice;
+using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::Return;
+
+namespace android {
+namespace CPP_VERSION {
+
+DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
+ sp<IDevicesFactory> defaultFactory{IDevicesFactory::getService()};
+ if (!defaultFactory) {
+ ALOGE("Failed to obtain IDevicesFactory/default service, terminating process.");
+ exit(1);
+ }
+ mDeviceFactories.push_back(defaultFactory);
+ if (MAJOR_VERSION >= 4) {
+ // The MSD factory is optional and only available starting at HAL 4.0
+ sp<IDevicesFactory> msdFactory{IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD)};
+ if (msdFactory) {
+ mDeviceFactories.push_back(msdFactory);
+ }
+ }
+ for (const auto& factory : mDeviceFactories) {
+ // It is assumed that the DevicesFactoryHalInterface instance is owned
+ // by AudioFlinger and thus have the same lifespan.
+ factory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+ }
+}
+
+
+#if MAJOR_VERSION == 2
+static IDevicesFactory::Device idFromHal(const char *name, status_t* status) {
+ *status = OK;
+ if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
+ return IDevicesFactory::Device::PRIMARY;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
+ return IDevicesFactory::Device::A2DP;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
+ return IDevicesFactory::Device::USB;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
+ return IDevicesFactory::Device::R_SUBMIX;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
+ return IDevicesFactory::Device::STUB;
+ }
+ ALOGE("Invalid device name %s", name);
+ *status = BAD_VALUE;
+ return {};
+}
+#elif MAJOR_VERSION == 4
+static const char* idFromHal(const char *name, status_t* status) {
+ *status = OK;
+ return name;
+}
+#endif
+
+status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ if (mDeviceFactories.empty()) return NO_INIT;
+ status_t status;
+ auto hidlId = idFromHal(name, &status);
+ if (status != OK) return status;
+ Result retval = Result::NOT_INITIALIZED;
+ for (const auto& factory : mDeviceFactories) {
+ Return<void> ret = factory->openDevice(
+ hidlId,
+ [&](Result r, const sp<IDevice>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ *device = new DeviceHalHidl(result);
+ }
+ });
+ if (!ret.isOk()) return FAILED_TRANSACTION;
+ switch (retval) {
+ // Device was found and was initialized successfully.
+ case Result::OK: return OK;
+ // Device was found but failed to initalize.
+ case Result::NOT_INITIALIZED: return NO_INIT;
+ // Otherwise continue iterating.
+ default: ;
+ }
+ }
+ ALOGW("The specified device name is not recognized: \"%s\"", name);
+ return BAD_VALUE;
+}
+
+} // namespace CPP_VERSION
+} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
similarity index 84%
rename from media/libaudiohal/2.0/DevicesFactoryHalHidl.h
rename to media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 0748849..a4282b0 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -18,15 +18,17 @@
#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
#include <android/hardware/audio/2.0/IDevicesFactory.h>
+#include <android/hardware/audio/4.0/IDevicesFactory.h>
#include <media/audiohal/DevicesFactoryHalInterface.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include "DeviceHalHidl.h"
-using ::android::hardware::audio::V2_0::IDevicesFactory;
+using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
namespace android {
+namespace CPP_VERSION {
class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
{
@@ -38,17 +40,15 @@
private:
friend class DevicesFactoryHalHybrid;
- sp<IDevicesFactory> mDevicesFactory;
- sp<IDevicesFactory> mDevicesFactoryMsd;
-
- static status_t nameFromHal(const char *name, IDevicesFactory::Device *device);
+ std::vector<sp<IDevicesFactory>> mDeviceFactories;
// Can not be constructed directly by clients.
DevicesFactoryHalHidl();
- virtual ~DevicesFactoryHalHidl();
+ virtual ~DevicesFactoryHalHidl() = default;
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
similarity index 95%
rename from media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
rename to media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
index 1c4be74..f337a8b 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
@@ -22,15 +22,13 @@
#include "DevicesFactoryHalHidl.h"
namespace android {
+namespace CPP_VERSION {
DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
: mLocalFactory(new DevicesFactoryHalLocal()),
mHidlFactory(new DevicesFactoryHalHidl()) {
}
-DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
-}
-
status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
@@ -39,4 +37,5 @@
return mLocalFactory->openDevice(name, device);
}
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
similarity index 89%
rename from media/libaudiohal/2.0/DevicesFactoryHalHybrid.h
rename to media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index abd57d6..5ac0d0d 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -22,26 +22,27 @@
#include <utils/RefBase.h>
namespace android {
+namespace CPP_VERSION {
class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
{
public:
+ DevicesFactoryHalHybrid();
+
// Opens a device with the specified name. To close the device, it is
// necessary to release references to the returned object.
virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
private:
- friend class DevicesFactoryHalInterface;
-
- // Can not be constructed directly by clients.
- DevicesFactoryHalHybrid();
-
- virtual ~DevicesFactoryHalHybrid();
-
sp<DevicesFactoryHalInterface> mLocalFactory;
sp<DevicesFactoryHalInterface> mHidlFactory;
};
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal() {
+ return new DevicesFactoryHalHybrid();
+}
+
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
similarity index 97%
rename from media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp
rename to media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
index e54edd4..af67ff5 100644
--- a/media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
@@ -26,7 +26,7 @@
#include "DevicesFactoryHalLocal.h"
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
{
@@ -67,5 +67,5 @@
return rc;
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalLocal.h b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
similarity index 96%
rename from media/libaudiohal/2.0/DevicesFactoryHalLocal.h
rename to media/libaudiohal/impl/DevicesFactoryHalLocal.h
index b9d18ab..5d108dd 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalLocal.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
@@ -24,6 +24,7 @@
#include "DeviceHalLocal.h"
namespace android {
+namespace CPP_VERSION {
class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
{
@@ -41,6 +42,7 @@
virtual ~DevicesFactoryHalLocal() {}
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/4.0/EffectBufferHalHidl.cpp b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
similarity index 98%
rename from media/libaudiohal/4.0/EffectBufferHalHidl.cpp
rename to media/libaudiohal/impl/EffectBufferHalHidl.cpp
index 957c89f..6ef4e8a 100644
--- a/media/libaudiohal/4.0/EffectBufferHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
@@ -30,7 +30,7 @@
using ::android::hidl::allocator::V1_0::IAllocator;
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
// static
uint64_t EffectBufferHalHidl::makeUniqueId() {
@@ -142,5 +142,5 @@
memcpy(mExternalData, mAudioBuffer.raw, size);
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/EffectBufferHalHidl.h b/media/libaudiohal/impl/EffectBufferHalHidl.h
similarity index 92%
rename from media/libaudiohal/2.0/EffectBufferHalHidl.h
rename to media/libaudiohal/impl/EffectBufferHalHidl.h
index 31e0087..029d71a 100644
--- a/media/libaudiohal/2.0/EffectBufferHalHidl.h
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.h
@@ -18,16 +18,18 @@
#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
#include <android/hardware/audio/effect/2.0/types.h>
+#include <android/hardware/audio/effect/4.0/types.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidl/HidlSupport.h>
#include <media/audiohal/EffectBufferHalInterface.h>
#include <system/audio_effect.h>
-using android::hardware::audio::effect::V2_0::AudioBuffer;
+using android::hardware::audio::effect::CPP_VERSION::AudioBuffer;
using android::hardware::hidl_memory;
using android::hidl::memory::V1_0::IMemory;
namespace android {
+namespace CPP_VERSION {
class EffectBufferHalHidl : public EffectBufferHalInterface
{
@@ -71,6 +73,7 @@
status_t init();
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
similarity index 95%
rename from media/libaudiohal/4.0/EffectHalHidl.cpp
rename to media/libaudiohal/impl/EffectHalHidl.cpp
index c99c4c8..12649a1 100644
--- a/media/libaudiohal/4.0/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -22,26 +22,25 @@
#include <media/EffectsFactoryApi.h>
#include <utils/Log.h>
-#include "ConversionHelperHidl.h"
#include "EffectBufferHalHidl.h"
#include "EffectHalHidl.h"
#include "HidlUtils.h"
-using ::android::hardware::audio::effect::V4_0::AudioBuffer;
-using ::android::hardware::audio::effect::V4_0::EffectBufferAccess;
-using ::android::hardware::audio::effect::V4_0::EffectConfigParameters;
-using ::android::hardware::audio::effect::V4_0::MessageQueueFlagBits;
-using ::android::hardware::audio::effect::V4_0::Result;
-using ::android::hardware::audio::common::V4_0::HidlUtils;
-using ::android::hardware::audio::common::V4_0::AudioChannelMask;
-using ::android::hardware::audio::common::V4_0::AudioFormat;
+using ::android::hardware::audio::effect::CPP_VERSION::AudioBuffer;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectBufferAccess;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectConfigParameters;
+using ::android::hardware::audio::effect::CPP_VERSION::MessageQueueFlagBits;
+using ::android::hardware::audio::effect::CPP_VERSION::Result;
+using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
+using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
using ::android::hardware::audio::common::utils::mkEnumConverter;
using ::android::hardware::hidl_vec;
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
: mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
@@ -338,5 +337,5 @@
return result;
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
similarity index 85%
rename from media/libaudiohal/2.0/EffectHalHidl.h
rename to media/libaudiohal/impl/EffectHalHidl.h
index 6ffdaf1..04f40d3 100644
--- a/media/libaudiohal/2.0/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -18,19 +18,22 @@
#define ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
#include <android/hardware/audio/effect/2.0/IEffect.h>
+#include <android/hardware/audio/effect/4.0/IEffect.h>
#include <media/audiohal/EffectHalInterface.h>
#include <fmq/EventFlag.h>
#include <fmq/MessageQueue.h>
#include <system/audio_effect.h>
-using ::android::hardware::audio::effect::V2_0::EffectBufferConfig;
-using ::android::hardware::audio::effect::V2_0::EffectConfig;
-using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V2_0::IEffect;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectBufferConfig;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectConfig;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectDescriptor;
+using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
+using EffectResult = ::android::hardware::audio::effect::CPP_VERSION::Result;
using ::android::hardware::EventFlag;
using ::android::hardware::MessageQueue;
namespace android {
+namespace CPP_VERSION {
class EffectHalHidl : public EffectHalInterface
{
@@ -68,8 +71,7 @@
private:
friend class EffectsFactoryHalHidl;
- typedef MessageQueue<
- hardware::audio::effect::V2_0::Result, hardware::kSynchronizedReadWrite> StatusMQ;
+ typedef MessageQueue<EffectResult, hardware::kSynchronizedReadWrite> StatusMQ;
sp<IEffect> mEffect;
const uint64_t mEffectId;
@@ -79,7 +81,7 @@
std::unique_ptr<StatusMQ> mStatusMQ;
EventFlag* mEfGroup;
- static status_t analyzeResult(const hardware::audio::effect::V2_0::Result& result);
+ static status_t analyzeResult(const EffectResult& result);
static void effectBufferConfigFromHal(
const buffer_config_t& halConfig, EffectBufferConfig* config);
static void effectBufferConfigToHal(
@@ -103,6 +105,7 @@
status_t setProcessBuffers();
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
similarity index 93%
rename from media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp
rename to media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index dfed784..b880433 100644
--- a/media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -18,21 +18,21 @@
//#define LOG_NDEBUG 0
#include <cutils/native_handle.h>
-#include <libaudiohal/4.0/EffectsFactoryHalHidl.h>
+#include "EffectsFactoryHalHidl.h"
#include "ConversionHelperHidl.h"
#include "EffectBufferHalHidl.h"
#include "EffectHalHidl.h"
#include "HidlUtils.h"
-using ::android::hardware::audio::common::V4_0::HidlUtils;
-using ::android::hardware::audio::common::V4_0::Uuid;
-using ::android::hardware::audio::effect::V4_0::IEffect;
-using ::android::hardware::audio::effect::V4_0::Result;
+using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::Uuid;
+using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
+using ::android::hardware::audio::effect::CPP_VERSION::Result;
using ::android::hardware::Return;
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
mEffectsFactory = IEffectsFactory::getService();
@@ -42,9 +42,6 @@
}
}
-EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
-}
-
status_t EffectsFactoryHalHidl::queryAllDescriptors() {
if (mEffectsFactory == 0) return NO_INIT;
Result retval = Result::NOT_INITIALIZED;
@@ -148,5 +145,5 @@
}
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
similarity index 84%
rename from media/libaudiohal/2.0/EffectsFactoryHalHidl.h
rename to media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 82b5481..c6fced7 100644
--- a/media/libaudiohal/2.0/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -18,20 +18,25 @@
#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
#include <android/hardware/audio/effect/2.0/types.h>
+#include <android/hardware/audio/effect/4.0/types.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include "ConversionHelperHidl.h"
namespace android {
+namespace CPP_VERSION {
-using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V2_0::IEffectsFactory;
+using ::android::hardware::audio::effect::CPP_VERSION::EffectDescriptor;
+using ::android::hardware::audio::effect::CPP_VERSION::IEffectsFactory;
using ::android::hardware::hidl_vec;
class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
{
public:
+ EffectsFactoryHalHidl();
+
// Returns the number of different effects in all loaded libraries.
virtual status_t queryNumberEffects(uint32_t *pNumEffects);
@@ -56,18 +61,17 @@
sp<EffectBufferHalInterface>* buffer) override;
private:
- friend class EffectsFactoryHalInterface;
-
sp<IEffectsFactory> mEffectsFactory;
hidl_vec<EffectDescriptor> mLastDescriptors;
- // Can not be constructed directly by clients.
- EffectsFactoryHalHidl();
- virtual ~EffectsFactoryHalHidl();
-
status_t queryAllDescriptors();
};
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal() {
+ return new EffectsFactoryHalHidl();
+}
+
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
similarity index 93%
rename from media/libaudiohal/4.0/StreamHalHidl.cpp
rename to media/libaudiohal/impl/StreamHalHidl.cpp
index 1c2fdb0..b23e018 100644
--- a/media/libaudiohal/4.0/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "StreamHalHidl"
//#define LOG_NDEBUG 0
+#include <android/hardware/audio/2.0/IStreamOutCallback.h>
#include <android/hardware/audio/4.0/IStreamOutCallback.h>
#include <hwbinder/IPCThreadState.h>
#include <mediautils/SchedulingPolicyService.h>
@@ -27,30 +28,33 @@
#include "StreamHalHidl.h"
#include "VersionUtils.h"
-using ::android::hardware::audio::common::V4_0::AudioChannelMask;
-using ::android::hardware::audio::common::V4_0::AudioContentType;
-using ::android::hardware::audio::common::V4_0::AudioFormat;
-using ::android::hardware::audio::common::V4_0::AudioSource;
-using ::android::hardware::audio::common::V4_0::AudioUsage;
-using ::android::hardware::audio::common::V4_0::ThreadInfo;
-using ::android::hardware::audio::V4_0::AudioDrain;
-using ::android::hardware::audio::V4_0::IStreamOutCallback;
-using ::android::hardware::audio::V4_0::MessageQueueFlagBits;
-using ::android::hardware::audio::V4_0::MicrophoneInfo;
-using ::android::hardware::audio::V4_0::MmapBufferInfo;
-using ::android::hardware::audio::V4_0::MmapPosition;
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::PlaybackTrackMetadata;
-using ::android::hardware::audio::V4_0::RecordTrackMetadata;
-using ::android::hardware::audio::V4_0::Result;
-using ::android::hardware::audio::V4_0::TimeSpec;
+using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
+using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
+using ::android::hardware::audio::common::CPP_VERSION::ThreadInfo;
+using ::android::hardware::audio::CPP_VERSION::AudioDrain;
+using ::android::hardware::audio::CPP_VERSION::IStreamOutCallback;
+using ::android::hardware::audio::CPP_VERSION::MessageQueueFlagBits;
+using ::android::hardware::audio::CPP_VERSION::MmapBufferInfo;
+using ::android::hardware::audio::CPP_VERSION::MmapPosition;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::CPP_VERSION::TimeSpec;
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
using ::android::hardware::Void;
-using ReadCommand = ::android::hardware::audio::V4_0::IStreamIn::ReadCommand;
+using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
+
+#if MAJOR_VERSION == 4
+using ::android::hardware::audio::common::CPP_VERSION::AudioContentType;
+using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
+using ::android::hardware::audio::common::CPP_VERSION::AudioUsage;
+using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
+using ::android::hardware::audio::CPP_VERSION::PlaybackTrackMetadata;
+using ::android::hardware::audio::CPP_VERSION::RecordTrackMetadata;
+#endif
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
StreamHalHidl::StreamHalHidl(IStream *stream)
: ConversionHelperHidl("Stream"),
@@ -566,6 +570,12 @@
}
}
+#if MAJOR_VERSION == 2
+status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
+ // Audio HAL V2.0 does not support propagating source metadata
+ return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
/** Transform a standard collection to an HIDL vector. */
template <class Values, class ElementConverter>
static auto transformToHidlVec(const Values& values, ElementConverter converter) {
@@ -576,7 +586,7 @@
}
status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
- hardware::audio::V4_0::SourceMetadata halMetadata = {
+ hardware::audio::CPP_VERSION::SourceMetadata halMetadata = {
.tracks = transformToHidlVec(sourceMetadata.tracks,
[](const playback_track_metadata& metadata) -> PlaybackTrackMetadata {
return {
@@ -587,6 +597,7 @@
})};
return processReturn("updateSourceMetadata", mStream->updateSourceMetadata(halMetadata));
}
+#endif
void StreamOutHalHidl::onWriteReady() {
sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
@@ -782,7 +793,19 @@
}
}
+#if MAJOR_VERSION == 2
+status_t StreamInHalHidl::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ if (mStream == 0) return NO_INIT;
+ return INVALID_OPERATION;
+}
+status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
+ // Audio HAL V2.0 does not support propagating sink metadata
+ return INVALID_OPERATION;
+}
+
+#elif MAJOR_VERSION == 4
status_t StreamInHalHidl::getActiveMicrophones(
std::vector<media::MicrophoneInfo> *microphonesInfo) {
if (!mStream) return NO_INIT;
@@ -802,7 +825,7 @@
}
status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
- hardware::audio::V4_0::SinkMetadata halMetadata = {
+ hardware::audio::CPP_VERSION::SinkMetadata halMetadata = {
.tracks = transformToHidlVec(sinkMetadata.tracks,
[](const record_track_metadata& metadata) -> RecordTrackMetadata {
return {
@@ -812,6 +835,7 @@
})};
return processReturn("updateSinkMetadata", mStream->updateSinkMetadata(halMetadata));
}
+#endif
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
similarity index 92%
rename from media/libaudiohal/2.0/StreamHalHidl.h
rename to media/libaudiohal/impl/StreamHalHidl.h
index ebad8ae..95ec7f1 100644
--- a/media/libaudiohal/2.0/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -20,8 +20,11 @@
#include <atomic>
#include <android/hardware/audio/2.0/IStream.h>
+#include <android/hardware/audio/4.0/IStream.h>
#include <android/hardware/audio/2.0/IStreamIn.h>
+#include <android/hardware/audio/4.0/IStreamIn.h>
#include <android/hardware/audio/2.0/IStreamOut.h>
+#include <android/hardware/audio/4.0/IStreamOut.h>
#include <fmq/EventFlag.h>
#include <fmq/MessageQueue.h>
#include <media/audiohal/StreamHalInterface.h>
@@ -29,18 +32,19 @@
#include "ConversionHelperHidl.h"
#include "StreamPowerLog.h"
-using ::android::hardware::audio::V2_0::IStream;
-using ::android::hardware::audio::V2_0::IStreamIn;
-using ::android::hardware::audio::V2_0::IStreamOut;
+using ::android::hardware::audio::CPP_VERSION::IStream;
+using ::android::hardware::audio::CPP_VERSION::IStreamIn;
+using ::android::hardware::audio::CPP_VERSION::IStreamOut;
using ::android::hardware::EventFlag;
using ::android::hardware::MessageQueue;
using ::android::hardware::Return;
-using ReadParameters = ::android::hardware::audio::V2_0::IStreamIn::ReadParameters;
-using ReadStatus = ::android::hardware::audio::V2_0::IStreamIn::ReadStatus;
-using WriteCommand = ::android::hardware::audio::V2_0::IStreamOut::WriteCommand;
-using WriteStatus = ::android::hardware::audio::V2_0::IStreamOut::WriteStatus;
+using ReadParameters = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadParameters;
+using ReadStatus = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadStatus;
+using WriteCommand = ::android::hardware::audio::CPP_VERSION::IStreamOut::WriteCommand;
+using WriteStatus = ::android::hardware::audio::CPP_VERSION::IStreamOut::WriteStatus;
namespace android {
+namespace CPP_VERSION {
class DeviceHalHidl;
@@ -243,6 +247,7 @@
status_t prepareForReading(size_t bufferSize);
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_H
diff --git a/media/libaudiohal/4.0/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
similarity index 97%
rename from media/libaudiohal/4.0/StreamHalLocal.cpp
rename to media/libaudiohal/impl/StreamHalLocal.cpp
index e9d96bf..b134f57 100644
--- a/media/libaudiohal/4.0/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -25,7 +25,7 @@
#include "VersionUtils.h"
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
: mDevice(device),
@@ -341,6 +341,12 @@
return mStream->get_mmap_position(mStream, position);
}
+#if MAJOR_VERSION == 2
+status_t StreamInHalLocal::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ return INVALID_OPERATION;
+}
+#elif MAJOR_VERSION == 4
status_t StreamInHalLocal::getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
if (mStream->get_active_microphones == NULL) return INVALID_OPERATION;
size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
@@ -352,6 +358,7 @@
}
return status;
}
+#endif
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
similarity index 99%
rename from media/libaudiohal/2.0/StreamHalLocal.h
rename to media/libaudiohal/impl/StreamHalLocal.h
index cda8d0c..cea4229 100644
--- a/media/libaudiohal/2.0/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -21,6 +21,7 @@
#include "StreamPowerLog.h"
namespace android {
+namespace CPP_VERSION {
class DeviceHalLocal;
@@ -214,6 +215,7 @@
virtual ~StreamInHalLocal();
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
diff --git a/media/libaudiohal/2.0/StreamPowerLog.h b/media/libaudiohal/impl/StreamPowerLog.h
similarity index 98%
rename from media/libaudiohal/2.0/StreamPowerLog.h
rename to media/libaudiohal/impl/StreamPowerLog.h
index a78b1aa..5fd3912 100644
--- a/media/libaudiohal/2.0/StreamPowerLog.h
+++ b/media/libaudiohal/impl/StreamPowerLog.h
@@ -23,6 +23,7 @@
#include <system/audio.h>
namespace android {
+namespace CPP_VERSION {
class StreamPowerLog {
public:
@@ -97,6 +98,7 @@
size_t mFrameSize;
};
+} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_STREAM_POWER_LOG_H
diff --git a/media/libaudiohal/impl/VersionMacro.h b/media/libaudiohal/impl/VersionMacro.h
new file mode 100644
index 0000000..98e9c07
--- /dev/null
+++ b/media/libaudiohal/impl/VersionMacro.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_VERSION_MACRO_H
+#define ANDROID_HARDWARE_VERSION_MACRO_H
+
+#if !defined(MAJOR_VERSION) || !defined(MINOR_VERSION)
+#error "MAJOR_VERSION and MINOR_VERSION must be defined"
+#endif
+
+#define CONCAT_3(a,b,c) a##b##c
+#define EXPAND_CONCAT_3(a,b,c) CONCAT_3(a,b,c)
+/** The directory name of the version: <major>.<minor> */
+#define FILE_VERSION EXPAND_CONCAT_3(MAJOR_VERSION,.,MINOR_VERSION)
+
+#define CONCAT_4(a,b,c,d) a##b##c##d
+#define EXPAND_CONCAT_4(a,b,c,d) CONCAT_4(a,b,c,d)
+/** The c++ namespace of the version: V<major>_<minor> */
+#define CPP_VERSION EXPAND_CONCAT_4(V,MAJOR_VERSION,_,MINOR_VERSION)
+
+#endif // ANDROID_HARDWARE_VERSION_MACRO_H
diff --git a/media/libaudiohal/4.0/VersionUtils.h b/media/libaudiohal/impl/VersionUtils.h
similarity index 61%
rename from media/libaudiohal/4.0/VersionUtils.h
rename to media/libaudiohal/impl/VersionUtils.h
index 1246c2e..5004895 100644
--- a/media/libaudiohal/4.0/VersionUtils.h
+++ b/media/libaudiohal/impl/VersionUtils.h
@@ -14,22 +14,36 @@
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_VERSION_UTILS_4_0_H
-#define ANDROID_HARDWARE_VERSION_UTILS_4_0_H
+#ifndef ANDROID_HARDWARE_VERSION_UTILS_H
+#define ANDROID_HARDWARE_VERSION_UTILS_H
+#include <android/hardware/audio/2.0/types.h>
#include <android/hardware/audio/4.0/types.h>
#include <hidl/HidlSupport.h>
-using ::android::hardware::audio::V4_0::ParameterValue;
-using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::audio::CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CPP_VERSION::Result;
using ::android::hardware::Return;
using ::android::hardware::hidl_vec;
using ::android::hardware::hidl_string;
namespace android {
-namespace V4_0 {
+namespace CPP_VERSION {
namespace utils {
+#if MAJOR_VERSION == 2
+template <class T, class Callback>
+Return<void> getParameters(T& object, hidl_vec<ParameterValue> /*context*/,
+ hidl_vec<hidl_string> keys, Callback callback) {
+ return object->getParameters(keys, callback);
+}
+
+template <class T>
+Return<Result> setParameters(T& object, hidl_vec<ParameterValue> /*context*/,
+ hidl_vec<ParameterValue> keys) {
+ return object->setParameters(keys);
+}
+#elif MAJOR_VERSION == 4
template <class T, class Callback>
Return<void> getParameters(T& object, hidl_vec<ParameterValue> context,
hidl_vec<hidl_string> keys, Callback callback) {
@@ -41,9 +55,10 @@
hidl_vec<ParameterValue> keys) {
return object->setParameters(context, keys);
}
+#endif
} // namespace utils
-} // namespace V4_0
+} // namespace CPP_VERSION
} // namespace android
-#endif // ANDROID_HARDWARE_VERSION_UTILS_4_0_H
+#endif // ANDROID_HARDWARE_VERSION_UTILS_H
diff --git a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
new file mode 100644
index 0000000..fa0effc
--- /dev/null
+++ b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
+#define ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
+
+/** @file Library entry points to create the HAL factories. */
+
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+namespace V2_0 {
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
+} // namespace V2_0
+
+namespace V4_0 {
+sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
+sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
+} // namespace V4_0
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 1b3a1be..a22819a 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -32,6 +32,9 @@
"libaudioclient_headers",
"libaudio_system_headers",
],
+ export_header_lib_headers: [
+ "libmedia_headers",
+ ],
clang: true,
}
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 034f7c2..1c95e27 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -36,6 +36,8 @@
const char * const AudioParameter::keyHwAvSync = AUDIO_PARAMETER_HW_AV_SYNC;
const char * const AudioParameter::keyPresentationId = AUDIO_PARAMETER_STREAM_PRESENTATION_ID;
const char * const AudioParameter::keyProgramId = AUDIO_PARAMETER_STREAM_PROGRAM_ID;
+const char * const AudioParameter::keyAudioLanguagePreferred =
+ AUDIO_PARAMETER_KEY_AUDIO_LANGUAGE_PREFERRED;
const char * const AudioParameter::keyMonoOutput = AUDIO_PARAMETER_MONO_OUTPUT;
const char * const AudioParameter::keyStreamHwAvSync = AUDIO_PARAMETER_STREAM_HW_AV_SYNC;
const char * const AudioParameter::keyStreamConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index a3db754..514c795 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -132,6 +132,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_MMAP_NOIRQ),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_VOIP_TX),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_AV_SYNC),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_DIRECT),
TERMINATOR
};
diff --git a/media/libmedia/include/media/PatchBuilder.h b/media/libmedia/include/media/PatchBuilder.h
new file mode 100644
index 0000000..f2722a6
--- /dev/null
+++ b/media/libmedia/include/media/PatchBuilder.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PATCH_BUILDER_H
+#define ANDROID_PATCH_BUILDER_H
+
+#include <functional>
+#include <utility>
+
+#include <system/audio.h>
+#include <utils/StrongPointer.h>
+
+// This is a header-only utility.
+
+namespace android {
+
+class PatchBuilder {
+ public:
+ using mix_usecase_t = decltype(audio_port_config_mix_ext::usecase);
+
+ PatchBuilder() = default;
+
+ // All existing methods operating on audio patches take a pointer to const.
+ // It's OK to construct a temporary PatchBuilder while preparing a parameter
+ // to such a function because the Builder will be kept alive until the code
+ // execution reaches the function call statement semicolon.
+ const struct audio_patch* patch() const { return &mPatch; }
+
+ template<typename T, typename... S>
+ PatchBuilder& addSink(T&& t, S&&... s) {
+ sinks().add(std::forward<T>(t), std::forward<S>(s)...);
+ return *this;
+ }
+ // Explicit type of the second parameter allows clients to provide the struct inline.
+ template<typename T>
+ PatchBuilder& addSink(T&& t, const mix_usecase_t& update) {
+ sinks().add(std::forward<T>(t), update);
+ return *this;
+ }
+ template<typename T, typename... S>
+ PatchBuilder& addSource(T&& t, S&&... s) {
+ sources().add(std::forward<T>(t), std::forward<S>(s)...);
+ return *this;
+ }
+ // Explicit type of the second parameter allows clients to provide the struct inline.
+ template<typename T>
+ PatchBuilder& addSource(T&& t, const mix_usecase_t& update) {
+ sources().add(std::forward<T>(t), update);
+ return *this;
+ }
+
+ private:
+ struct PortCfgs {
+ PortCfgs(unsigned int *countPtr, struct audio_port_config *portCfgs)
+ : mCountPtr(countPtr), mPortCfgs(portCfgs) {}
+ audio_port_config& add(const audio_port_config& portCfg) {
+ return *advance() = portCfg;
+ }
+ template<typename T>
+ audio_port_config& add(const sp<T>& entity) {
+ audio_port_config* added = advance();
+ entity->toAudioPortConfig(added);
+ return *added;
+ }
+ template<typename T>
+ void add(const sp<T>& entity, const mix_usecase_t& usecaseUpdate) {
+ add(entity).ext.mix.usecase = usecaseUpdate;
+ }
+ template<typename T>
+ void add(const sp<T>& entity,
+ std::function<mix_usecase_t(const mix_usecase_t&)> usecaseUpdater) {
+ mix_usecase_t* usecase = &add(entity).ext.mix.usecase;
+ *usecase = usecaseUpdater(*usecase);
+ }
+ struct audio_port_config* advance() {
+ return &mPortCfgs[(*mCountPtr)++];
+ }
+ unsigned int *mCountPtr;
+ struct audio_port_config *mPortCfgs;
+ };
+
+ PortCfgs sinks() { return PortCfgs(&mPatch.num_sinks, mPatch.sinks); }
+ PortCfgs sources() { return PortCfgs(&mPatch.num_sources, mPatch.sources); }
+
+ struct audio_patch mPatch = {};
+};
+
+} // namespace android
+
+#endif // ANDROID_PATCH_BUILDER_H
diff --git a/media/libmedia/include/media/SingleStateQueue.h b/media/libmedia/include/media/SingleStateQueue.h
index d423962..c2761cb 100644
--- a/media/libmedia/include/media/SingleStateQueue.h
+++ b/media/libmedia/include/media/SingleStateQueue.h
@@ -99,6 +99,13 @@
return mShared->mAck - sequence >= 0;
}
+ // returns the last value written (or the contents of the shared buffer after initialization
+ // if no value was written).
+ T last() const
+ { // assume no sequence check required - we are the writer.
+ return mShared->mValue;
+ }
+
private:
int32_t mSequence;
Shared * const mShared;
diff --git a/media/libmediaextractor/MediaBuffer.cpp b/media/libmediaextractor/MediaBuffer.cpp
index 39f8d6e..d197b3f 100644
--- a/media/libmediaextractor/MediaBuffer.cpp
+++ b/media/libmediaextractor/MediaBuffer.cpp
@@ -39,7 +39,7 @@
mRangeOffset(0),
mRangeLength(size),
mOwnsData(false),
- mMetaData(new MetaData),
+ mMetaData(new MetaDataBase),
mOriginal(NULL) {
}
@@ -51,7 +51,7 @@
mRangeOffset(0),
mRangeLength(size),
mOwnsData(true),
- mMetaData(new MetaData),
+ mMetaData(new MetaDataBase),
mOriginal(NULL) {
if (size < kSharedMemThreshold
|| std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
@@ -84,7 +84,7 @@
mRangeLength(mSize),
mBuffer(buffer),
mOwnsData(false),
- mMetaData(new MetaData),
+ mMetaData(new MetaDataBase),
mOriginal(NULL) {
}
@@ -96,7 +96,7 @@
return;
}
- int prevCount = __sync_fetch_and_sub(&mRefCount, 1);
+ int prevCount = mRefCount.fetch_sub(1);
if (prevCount == 1) {
if (mObserver == NULL) {
delete this;
@@ -110,13 +110,13 @@
void MediaBuffer::claim() {
CHECK(mObserver != NULL);
- CHECK_EQ(mRefCount, 1);
+ CHECK_EQ(mRefCount.load(std::memory_order_relaxed), 1);
- mRefCount = 0;
+ mRefCount.store(0, std::memory_order_relaxed);
}
void MediaBuffer::add_ref() {
- (void) __sync_fetch_and_add(&mRefCount, 1);
+ (void) mRefCount.fetch_add(1);
}
void *MediaBuffer::data() const {
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
index f944d51..5a25965 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
@@ -86,12 +86,14 @@
virtual MediaBufferBase *clone();
// sum of localRefcount() and remoteRefcount()
+ // Result should be treated as approximate unless the result precludes concurrent accesses.
virtual int refcount() const {
return localRefcount() + remoteRefcount();
}
+ // Result should be treated as approximate unless the result precludes concurrent accesses.
virtual int localRefcount() const {
- return mRefCount;
+ return mRefCount.load(std::memory_order_relaxed);
}
virtual int remoteRefcount() const {
@@ -146,7 +148,7 @@
void claim();
MediaBufferObserver *mObserver;
- int mRefCount;
+ std::atomic<int> mRefCount;
void *mData;
size_t mSize, mRangeOffset, mRangeLength;
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 1fa8789..0fb5abc 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -9,6 +9,7 @@
srcs: [
"JAudioTrack.cpp",
+ "JavaVMHelper.cpp",
"MediaPlayer2AudioOutput.cpp",
"mediaplayer2.cpp",
],
@@ -49,6 +50,10 @@
"media_plugin_headers",
],
+ include_dirs: [
+ "frameworks/base/core/jni",
+ ],
+
static_libs: [
"libmedia_helper",
"libstagefright_nuplayer2",
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index ac0cc57..778ae1b 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -21,7 +21,7 @@
#include "mediaplayer2/JAudioTrack.h"
#include <android_media_AudioErrors.h>
-#include <android_runtime/AndroidRuntime.h>
+#include <mediaplayer2/JavaVMHelper.h>
namespace android {
@@ -39,7 +39,7 @@
const audio_attributes_t* pAttributes, // AudioAttributes
float maxRequiredSpeed) { // bufferSizeInBytes
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
mAudioTrackCls = (jclass) env->NewGlobalRef(jAudioTrackCls);
@@ -116,19 +116,19 @@
}
JAudioTrack::~JAudioTrack() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
env->DeleteGlobalRef(mAudioTrackCls);
}
size_t JAudioTrack::frameCount() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetBufferSizeInFrames = env->GetMethodID(
mAudioTrackCls, "getBufferSizeInFrames", "()I");
return env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
}
size_t JAudioTrack::channelCount() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
return env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
}
@@ -143,7 +143,7 @@
return BAD_VALUE;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetPlaybackHeadPosition = env->GetMethodID(
mAudioTrackCls, "getPlaybackHeadPosition", "()I");
*position = env->CallIntMethod(mAudioTrackObj, jGetPlaybackHeadPosition);
@@ -152,7 +152,7 @@
}
bool JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
@@ -189,7 +189,7 @@
status_t JAudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) {
// TODO: existing native AudioTrack returns INVALID_OPERATION on offload/direct/fast tracks.
// Should we do the same thing?
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
jmethodID jPlaybackParamsCtor = env->GetMethodID(jPlaybackParamsCls, "<init>", "()V");
@@ -224,7 +224,7 @@
}
const AudioPlaybackRate JAudioTrack::getPlaybackRate() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetPlaybackParams = env->GetMethodID(
mAudioTrackCls, "getPlaybackParams", "()Landroid/media/PlaybackParams;");
@@ -266,7 +266,7 @@
return media::VolumeShaper::Status(BAD_VALUE);
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jCreateVolumeShaper = env->GetMethodID(mAudioTrackCls, "createVolumeShaper",
"(Landroid/media/VolumeShaper$Configuration;)Landroid/media/VolumeShaper;");
@@ -282,7 +282,7 @@
}
status_t JAudioTrack::setAuxEffectSendLevel(float level) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jSetAuxEffectSendLevel = env->GetMethodID(
mAudioTrackCls, "setAuxEffectSendLevel", "(F)I");
int result = env->CallIntMethod(mAudioTrackObj, jSetAuxEffectSendLevel, level);
@@ -290,14 +290,14 @@
}
status_t JAudioTrack::attachAuxEffect(int effectId) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jAttachAuxEffect = env->GetMethodID(mAudioTrackCls, "attachAuxEffect", "(I)I");
int result = env->CallIntMethod(mAudioTrackObj, jAttachAuxEffect, effectId);
return javaToNativeStatus(result);
}
status_t JAudioTrack::setVolume(float left, float right) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
// TODO: Java setStereoVolume is deprecated. Do we really need this method?
jmethodID jSetStereoVolume = env->GetMethodID(mAudioTrackCls, "setStereoVolume", "(FF)I");
int result = env->CallIntMethod(mAudioTrackObj, jSetStereoVolume, left, right);
@@ -305,14 +305,14 @@
}
status_t JAudioTrack::setVolume(float volume) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jSetVolume = env->GetMethodID(mAudioTrackCls, "setVolume", "(F)I");
int result = env->CallIntMethod(mAudioTrackObj, jSetVolume, volume);
return javaToNativeStatus(result);
}
status_t JAudioTrack::start() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jPlay = env->GetMethodID(mAudioTrackCls, "play", "()V");
// TODO: Should we catch the Java IllegalStateException from play()?
env->CallVoidMethod(mAudioTrackObj, jPlay);
@@ -324,7 +324,7 @@
return BAD_VALUE;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jbyteArray jAudioData = env->NewByteArray(size);
env->SetByteArrayRegion(jAudioData, 0, size, (jbyte *) buffer);
@@ -353,7 +353,7 @@
}
void JAudioTrack::stop() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jStop = env->GetMethodID(mAudioTrackCls, "stop", "()V");
env->CallVoidMethod(mAudioTrackObj, jStop);
// TODO: Should we catch IllegalStateException?
@@ -365,20 +365,20 @@
}
void JAudioTrack::flush() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jFlush = env->GetMethodID(mAudioTrackCls, "flush", "()V");
env->CallVoidMethod(mAudioTrackObj, jFlush);
}
void JAudioTrack::pause() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jPause = env->GetMethodID(mAudioTrackCls, "pause", "()V");
env->CallVoidMethod(mAudioTrackObj, jPause);
// TODO: Should we catch IllegalStateException?
}
bool JAudioTrack::isPlaying() const {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetPlayState = env->GetMethodID(mAudioTrackCls, "getPlayState", "()I");
int currentPlayState = env->CallIntMethod(mAudioTrackObj, jGetPlayState);
@@ -393,7 +393,7 @@
}
uint32_t JAudioTrack::getSampleRate() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetSampleRate = env->GetMethodID(mAudioTrackCls, "getSampleRate", "()I");
return env->CallIntMethod(mAudioTrackObj, jGetSampleRate);
}
@@ -403,7 +403,7 @@
return BAD_VALUE;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetBufferSizeInFrames = env->GetMethodID(
mAudioTrackCls, "getBufferSizeInFrames", "()I");
int bufferSizeInFrames = env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
@@ -417,7 +417,7 @@
}
audio_format_t JAudioTrack::format() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
return audioFormatToNative(javaFormat);
@@ -454,7 +454,7 @@
}
audio_port_handle_t JAudioTrack::getRoutedDeviceId() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetRoutedDevice = env->GetMethodID(mAudioTrackCls, "getRoutedDevice",
"()Landroid/media/AudioDeviceInfo;");
jobject jAudioDeviceInfoObj = env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
@@ -469,14 +469,14 @@
}
audio_session_t JAudioTrack::getAudioSessionId() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetAudioSessionId = env->GetMethodID(mAudioTrackCls, "getAudioSessionId", "()I");
jint sessionId = env->CallIntMethod(mAudioTrackObj, jGetAudioSessionId);
return (audio_session_t) sessionId;
}
status_t JAudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jMP2ImplCls = env->FindClass("android/media/MediaPlayer2Impl");
jmethodID jSetAudioOutputDeviceById = env->GetMethodID(
jMP2ImplCls, "setAudioOutputDeviceById", "(Landroid/media/AudioTrack;I)Z");
@@ -550,7 +550,7 @@
return NULL;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
// Referenced "android_media_VolumeShaper.h".
jfloatArray xarray = nullptr;
@@ -595,7 +595,7 @@
jobject JAudioTrack::createVolumeShaperOperationObj(
const sp<media::VolumeShaper::Operation>& operation) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Operation$Builder");
jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
@@ -647,7 +647,7 @@
}
jobject JAudioTrack::createStreamEventCallback(callback_t cbf, void* user) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jCallbackCls = env->FindClass("android/media/MediaPlayer2Impl$StreamEventCallback");
jmethodID jCallbackCtor = env->GetMethodID(jCallbackCls, "<init>", "(JJJ)V");
jobject jCallbackObj = env->NewObject(jCallbackCls, jCallbackCtor, this, cbf, user);
@@ -655,7 +655,7 @@
}
jobject JAudioTrack::createCallbackExecutor() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jExecutorsCls = env->FindClass("java/util/concurrent/Executors");
jmethodID jNewSingleThreadExecutor = env->GetStaticMethodID(jExecutorsCls,
"newSingleThreadExecutor", "()Ljava/util/concurrent/ExecutorService;");
diff --git a/media/libmediaplayer2/JavaVMHelper.cpp b/media/libmediaplayer2/JavaVMHelper.cpp
new file mode 100644
index 0000000..90aaa7f
--- /dev/null
+++ b/media/libmediaplayer2/JavaVMHelper.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JavaVMHelper"
+
+#include "mediaplayer2/JavaVMHelper.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <stdlib.h>
+
+namespace android {
+
+// static
+std::atomic<JavaVM *> JavaVMHelper::sJavaVM(NULL);
+
+// static
+JNIEnv *JavaVMHelper::getJNIEnv() {
+ JNIEnv *env;
+ JavaVM *vm = sJavaVM.load();
+ CHECK(vm != NULL);
+
+ if (vm->GetEnv((void **)&env, JNI_VERSION_1_4) != JNI_OK) {
+ return NULL;
+ }
+
+ return env;
+}
+
+// static
+void JavaVMHelper::setJavaVM(JavaVM *vm) {
+ sJavaVM.store(vm);
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
new file mode 100644
index 0000000..35091b7
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef JAVA_VM_HELPER_H_
+
+#define JAVA_VM_HELPER_H_
+
+#include "jni.h"
+
+#include <atomic>
+
+namespace android {
+
+struct JavaVMHelper {
+ static JNIEnv *getJNIEnv();
+ static void setJavaVM(JavaVM *vm);
+
+private:
+ // Once a valid JavaVM has been set, it should never be reset or changed.
+ // However, as it may be accessed from multiple threads, access needs to be
+ // synchronized.
+ static std::atomic<JavaVM *> sJavaVM;
+};
+
+} // namespace android
+
+#endif // JAVA_VM_HELPER_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
index 3905b55..2fb5a2c 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
@@ -150,11 +150,11 @@
// Do not change these values without updating their counterparts in MediaPlayer2.java
enum mediaplayer2_states {
- MEDIAPLAYER2_STATE_IDLE = 1,
- MEDIAPLAYER2_STATE_PREPARED = 2,
- MEDIAPLAYER2_STATE_PLAYING = 3,
- MEDIAPLAYER2_STATE_PAUSED = 4,
- MEDIAPLAYER2_STATE_ERROR = 5,
+ MEDIAPLAYER2_STATE_IDLE = 1001,
+ MEDIAPLAYER2_STATE_PREPARED = 1002,
+ MEDIAPLAYER2_STATE_PLAYING = 1003,
+ MEDIAPLAYER2_STATE_PAUSED = 1004,
+ MEDIAPLAYER2_STATE_ERROR = 1005,
};
enum media_player2_internal_states {
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index d586192..3af212e 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -68,7 +68,7 @@
status_t stop();
status_t pause();
bool isPlaying();
- mediaplayer2_states getMediaPlayer2State();
+ mediaplayer2_states getState();
status_t setPlaybackSettings(const AudioPlaybackRate& rate);
status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint);
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index e5567dc..4fb47b8 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -860,7 +860,7 @@
return false;
}
-mediaplayer2_states MediaPlayer2::getMediaPlayer2State() {
+mediaplayer2_states MediaPlayer2::getState() {
Mutex::Autolock _l(mLock);
if (mCurrentState & MEDIA_PLAYER2_STATE_ERROR) {
return MEDIAPLAYER2_STATE_ERROR;
diff --git a/media/libnblog/PerformanceAnalysis.cpp b/media/libnblog/PerformanceAnalysis.cpp
index f09e93d..22a30b9 100644
--- a/media/libnblog/PerformanceAnalysis.cpp
+++ b/media/libnblog/PerformanceAnalysis.cpp
@@ -254,7 +254,7 @@
// of PerformanceAnalysis
void PerformanceAnalysis::reportPerformance(String8 *body, int author, log_hash_t hash,
int maxHeight) {
- if (mHists.empty()) {
+ if (mHists.empty() || body == nullptr) {
return;
}
@@ -273,10 +273,13 @@
}
}
- // underscores and spaces length corresponds to maximum width of histogram
- static const int kLen = 200;
- std::string underscores(kLen, '_');
- std::string spaces(kLen, ' ');
+ static const int SIZE = 128;
+ char title[SIZE];
+ snprintf(title, sizeof(title), "\n%s %3.2f %s\n%s%d, %lld, %lld\n",
+ "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:",
+ "Thread, hash, starting timestamp: ", author,
+ static_cast<long long>(hash), static_cast<long long>(startingTs));
+ static const char * const kLabel = "ms";
auto it = buckets.begin();
double maxDelta = it->first;
@@ -299,11 +302,7 @@
scalingFactor = (height + maxHeight) / maxHeight;
height /= scalingFactor;
}
- body->appendFormat("\n%*s %3.2f %s", leftPadding + 11,
- "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:");
- body->appendFormat("\n%*s%d, %lld, %lld\n", leftPadding + 11,
- "Thread, hash, starting timestamp: ", author,
- static_cast<long long int>(hash), static_cast<long long int>(startingTs));
+ body->appendFormat("%s", title);
// write histogram label line with bucket values
body->appendFormat("\n%s", " ");
body->appendFormat("%*s", leftPadding, " ");
@@ -312,6 +311,11 @@
body->appendFormat("%*d", colWidth, x.second);
}
// write histogram ascii art
+ // underscores and spaces length corresponds to maximum width of histogram
+ static const int kLen = 200;
+ static const std::string underscores(kLen, '_');
+ static const std::string spaces(kLen, ' ');
+
body->appendFormat("\n%s", " ");
for (int row = height * scalingFactor; row >= 0; row -= scalingFactor) {
const int value = 1 << row;
@@ -335,7 +339,7 @@
const int colWidth = numberWidth(x.first, leftPadding);
body->appendFormat("%*.*f", colWidth, 1, x.first);
}
- body->appendFormat("%.*s%s", bucketWidth, spaces.c_str(), "ms\n");
+ body->appendFormat("%.*s%s\n", bucketWidth, spaces.c_str(), kLabel);
// Now report glitches
body->appendFormat("\ntime elapsed between glitches and glitch timestamps:\n");
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 7f39d10..3526047 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -5556,6 +5556,11 @@
break;
}
+ case kWhatCheckIfStuck: {
+ ALOGV("No-op by default");
+ break;
+ }
+
default:
return false;
}
@@ -7873,6 +7878,18 @@
break;
}
+ case kWhatCheckIfStuck:
+ {
+ int32_t generation = 0;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation == mCodec->mStateGeneration) {
+ mCodec->signalError(OMX_ErrorUndefined, TIMED_OUT);
+ }
+
+ handled = true;
+ break;
+ }
+
default:
handled = BaseState::onMessageReceived(msg);
break;
@@ -7884,6 +7901,11 @@
void ACodec::OutputPortSettingsChangedState::stateEntered() {
ALOGV("[%s] Now handling output port settings change",
mCodec->mComponentName.c_str());
+
+ // If we haven't transitioned after 3 seconds, we're probably stuck.
+ sp<AMessage> msg = new AMessage(ACodec::kWhatCheckIfStuck, mCodec);
+ msg->setInt32("generation", mCodec->mStateGeneration);
+ msg->post(3000000);
}
bool ACodec::OutputPortSettingsChangedState::onOMXFrameRendered(
@@ -8146,6 +8168,11 @@
ALOGV("[%s] Now Flushing", mCodec->mComponentName.c_str());
mFlushComplete[kPortIndexInput] = mFlushComplete[kPortIndexOutput] = false;
+
+ // If we haven't transitioned after 3 seconds, we're probably stuck.
+ sp<AMessage> msg = new AMessage(ACodec::kWhatCheckIfStuck, mCodec);
+ msg->setInt32("generation", mCodec->mStateGeneration);
+ msg->post(3000000);
}
bool ACodec::FlushingState::onMessageReceived(const sp<AMessage> &msg) {
@@ -8160,6 +8187,7 @@
msg->setInt32("generation", mCodec->mStateGeneration);
msg->post(3000000);
}
+ handled = true;
break;
}
@@ -8180,6 +8208,18 @@
break;
}
+ case kWhatCheckIfStuck:
+ {
+ int32_t generation = 0;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation == mCodec->mStateGeneration) {
+ mCodec->signalError(OMX_ErrorUndefined, TIMED_OUT);
+ }
+
+ handled = true;
+ break;
+ }
+
default:
handled = BaseState::onMessageReceived(msg);
break;
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 710ae68..266a240 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -129,6 +129,7 @@
secureHandle = static_cast<native_handle_t *>(secureData->getDestinationPointer());
}
ssize_t result = -1;
+ ssize_t codecDataOffset = 0;
if (mCrypto != NULL) {
ICrypto::DestinationBuffer destination;
if (secure) {
@@ -180,9 +181,16 @@
Status status = Status::OK;
hidl_string detailedError;
+ ScramblingControl sctrl = ScramblingControl::UNSCRAMBLED;
+
+ if (key != NULL) {
+ sctrl = (ScramblingControl)key[0];
+ // Adjust for the PES offset
+ codecDataOffset = key[2] | (key[3] << 8);
+ }
auto returnVoid = mDescrambler->descramble(
- key != NULL ? (ScramblingControl)key[0] : ScramblingControl::UNSCRAMBLED,
+ sctrl,
hidlSubSamples,
srcBuffer,
0,
@@ -202,6 +210,11 @@
return UNKNOWN_ERROR;
}
+ if (result < codecDataOffset) {
+ ALOGD("invalid codec data offset: %zd, result %zd", codecDataOffset, result);
+ return BAD_VALUE;
+ }
+
ALOGV("descramble succeeded, %zd bytes", result);
if (dstBuffer.type == BufferType::SHARED_MEMORY) {
@@ -210,7 +223,7 @@
}
}
- it->mCodecBuffer->setRange(0, result);
+ it->mCodecBuffer->setRange(codecDataOffset, result - codecDataOffset);
// Copy metadata from client to codec buffer.
it->mCodecBuffer->meta()->clear();
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 6e94517..3370df1 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -501,9 +501,10 @@
return ERROR_MALFORMED;
}
- int32_t width, height;
+ int32_t width, height, stride;
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
+ CHECK(outputFormat->findInt32("stride", &stride));
int32_t crop_left, crop_top, crop_right, crop_bottom;
if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
@@ -530,11 +531,10 @@
if (converter.isValid()) {
converter.convert(
(const uint8_t *)videoFrameBuffer->data(),
- width, height,
+ width, height, stride,
crop_left, crop_top, crop_right, crop_bottom,
frame->getFlattenedData(),
- frame->mWidth,
- frame->mHeight,
+ frame->mWidth, frame->mHeight, frame->mRowBytes,
crop_left, crop_top, crop_right, crop_bottom);
return OK;
}
@@ -681,9 +681,10 @@
return ERROR_MALFORMED;
}
- int32_t width, height;
+ int32_t width, height, stride;
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
+ CHECK(outputFormat->findInt32("stride", &stride));
if (mFrame == NULL) {
sp<IMemory> frameMem = allocVideoFrame(
@@ -727,11 +728,10 @@
if (converter.isValid()) {
converter.convert(
(const uint8_t *)videoFrameBuffer->data(),
- width, height,
+ width, height, stride,
crop_left, crop_top, crop_right, crop_bottom,
mFrame->getFlattenedData(),
- mFrame->mWidth,
- mFrame->mHeight,
+ mFrame->mWidth, mFrame->mHeight, mFrame->mRowBytes,
dstLeft, dstTop, dstRight, dstBottom);
return OK;
}
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index f6c61a0..2d4bd39 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -123,7 +123,7 @@
};
Mutex MediaExtractorFactory::gPluginMutex;
-std::shared_ptr<List<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
+std::shared_ptr<std::list<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
bool MediaExtractorFactory::gPluginsRegistered = false;
// static
@@ -133,7 +133,7 @@
*confidence = 0.0f;
*meta = nullptr;
- std::shared_ptr<List<sp<ExtractorPlugin>>> plugins;
+ std::shared_ptr<std::list<sp<ExtractorPlugin>>> plugins;
{
Mutex::Autolock autoLock(gPluginMutex);
if (!gPluginsRegistered) {
@@ -145,6 +145,7 @@
MediaExtractor::CreatorFunc curCreator = NULL;
MediaExtractor::CreatorFunc bestCreator = NULL;
for (auto it = plugins->begin(); it != plugins->end(); ++it) {
+ ALOGV("sniffing %s", (*it)->def.extractor_name);
float newConfidence;
void *newMeta = nullptr;
MediaExtractor::FreeMetaFunc newFreeMeta = nullptr;
@@ -171,7 +172,7 @@
// static
void MediaExtractorFactory::RegisterExtractor(const sp<ExtractorPlugin> &plugin,
- List<sp<ExtractorPlugin>> &pluginList) {
+ std::list<sp<ExtractorPlugin>> &pluginList) {
// sanity check check struct version, uuid, name
if (plugin->def.def_version == 0
|| plugin->def.def_version > MediaExtractor::EXTRACTORDEF_VERSION) {
@@ -213,7 +214,7 @@
//static
void MediaExtractorFactory::RegisterExtractorsInApk(
- const char *apkPath, List<sp<ExtractorPlugin>> &pluginList) {
+ const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList) {
ALOGV("search for plugins at %s", apkPath);
ZipArchiveHandle zipHandle;
int32_t ret = OpenArchive(apkPath, &zipHandle);
@@ -261,7 +262,7 @@
//static
void MediaExtractorFactory::RegisterExtractorsInSystem(
- const char *libDirPath, List<sp<ExtractorPlugin>> &pluginList) {
+ const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
ALOGV("search for plugins at %s", libDirPath);
DIR *libDir = opendir(libDirPath);
if (libDir) {
@@ -291,6 +292,10 @@
}
}
+static bool compareFunc(const sp<ExtractorPlugin>& first, const sp<ExtractorPlugin>& second) {
+ return strcmp(first->def.extractor_name, second->def.extractor_name) < 0;
+}
+
// static
void MediaExtractorFactory::UpdateExtractors(const char *newUpdateApkPath) {
Mutex::Autolock autoLock(gPluginMutex);
@@ -301,7 +306,7 @@
return;
}
- std::shared_ptr<List<sp<ExtractorPlugin>>> newList(new List<sp<ExtractorPlugin>>());
+ std::shared_ptr<std::list<sp<ExtractorPlugin>>> newList(new std::list<sp<ExtractorPlugin>>());
RegisterExtractorsInSystem("/system/lib"
#ifdef __LP64__
@@ -319,6 +324,7 @@
RegisterExtractorsInApk(newUpdateApkPath, *newList);
}
+ newList->sort(compareFunc);
gPlugins = newList;
gPluginsRegistered = true;
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index cf5e91e..ea778a4 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1577,6 +1577,7 @@
{ MEDIA_MIMETYPE_AUDIO_VORBIS, AUDIO_FORMAT_VORBIS },
{ MEDIA_MIMETYPE_AUDIO_OPUS, AUDIO_FORMAT_OPUS},
{ MEDIA_MIMETYPE_AUDIO_AC3, AUDIO_FORMAT_AC3},
+ { MEDIA_MIMETYPE_AUDIO_AC4, AUDIO_FORMAT_AC4},
{ MEDIA_MIMETYPE_AUDIO_FLAC, AUDIO_FORMAT_FLAC},
{ 0, AUDIO_FORMAT_INVALID }
};
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
index 813004b..942f850 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -431,7 +431,7 @@
}
if (mInputBufferCount == 0) {
- CHECK(mHeader == NULL);
+ delete mHeader;
mHeader = new OpusHeader();
memset(mHeader, 0, sizeof(*mHeader));
if (!ParseOpusHeader(data, size, mHeader)) {
@@ -452,6 +452,9 @@
}
int status = OPUS_INVALID_STATE;
+ if (mDecoder != NULL) {
+ opus_multistream_decoder_destroy(mDecoder);
+ }
mDecoder = opus_multistream_decoder_create(kRate,
mHeader->channels,
mHeader->num_streams,
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 05f4104..c46a40f 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -85,9 +85,15 @@
|| mDstFormat == OMX_COLOR_Format32bitBGRA8888;
}
+/*
+ * If stride is non-zero, client's stride will be used. For planar
+ * or semi-planar YUV formats, stride must be even numbers.
+ * If stride is zero, it will be calculated based on width and bpp
+ * of the format, assuming no padding on the right edge.
+ */
ColorConverter::BitmapParams::BitmapParams(
void *bits,
- size_t width, size_t height,
+ size_t width, size_t height, size_t stride,
size_t cropLeft, size_t cropTop,
size_t cropRight, size_t cropBottom,
OMX_COLOR_FORMATTYPE colorFromat)
@@ -101,6 +107,8 @@
mCropBottom(cropBottom) {
switch(mColorFormat) {
case OMX_COLOR_Format16bitRGB565:
+ case OMX_COLOR_FormatYUV420Planar16:
+ case OMX_COLOR_FormatCbYCrY:
mBpp = 2;
mStride = 2 * mWidth;
break;
@@ -112,13 +120,7 @@
mStride = 4 * mWidth;
break;
- case OMX_COLOR_FormatYUV420Planar16:
- mBpp = 2;
- mStride = 2 * mWidth;
- break;
-
case OMX_COLOR_FormatYUV420Planar:
- case OMX_COLOR_FormatCbYCrY:
case OMX_QCOM_COLOR_FormatYVU420SemiPlanar:
case OMX_COLOR_FormatYUV420SemiPlanar:
case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
@@ -132,6 +134,10 @@
mStride = mWidth;
break;
}
+ // use client's stride if it's specified.
+ if (stride != 0) {
+ mStride = stride;
+ }
}
size_t ColorConverter::BitmapParams::cropWidth() const {
@@ -144,21 +150,21 @@
status_t ColorConverter::convert(
const void *srcBits,
- size_t srcWidth, size_t srcHeight,
+ size_t srcWidth, size_t srcHeight, size_t srcStride,
size_t srcCropLeft, size_t srcCropTop,
size_t srcCropRight, size_t srcCropBottom,
void *dstBits,
- size_t dstWidth, size_t dstHeight,
+ size_t dstWidth, size_t dstHeight, size_t dstStride,
size_t dstCropLeft, size_t dstCropTop,
size_t dstCropRight, size_t dstCropBottom) {
BitmapParams src(
const_cast<void *>(srcBits),
- srcWidth, srcHeight,
+ srcWidth, srcHeight, srcStride,
srcCropLeft, srcCropTop, srcCropRight, srcCropBottom, mSrcFormat);
BitmapParams dst(
dstBits,
- dstWidth, dstHeight,
+ dstWidth, dstHeight, dstStride,
dstCropLeft, dstCropTop, dstCropRight, dstCropBottom, mDstFormat);
if (!((src.mCropLeft & 1) == 0
@@ -792,15 +798,15 @@
uint8_t *kAdjustedClip = initClip();
- uint16_t *dst_ptr = (uint16_t *)dst.mBits
- + dst.mCropTop * dst.mWidth + dst.mCropLeft;
+ uint16_t *dst_ptr = (uint16_t *)((uint8_t *)
+ dst.mBits + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp);
const uint8_t *src_y =
- (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
+ (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft;
const uint8_t *src_u =
- (const uint8_t *)src_y + src.mWidth * src.mHeight
- + src.mCropTop * src.mWidth + src.mCropLeft;
+ (const uint8_t *)src.mBits + src.mHeight * src.mStride +
+ src.mCropTop * src.mStride / 2 + src.mCropLeft;
for (size_t y = 0; y < src.cropHeight(); ++y) {
for (size_t x = 0; x < src.cropWidth(); x += 2) {
@@ -842,13 +848,13 @@
}
}
- src_y += src.mWidth;
+ src_y += src.mStride;
if (y & 1) {
- src_u += src.mWidth;
+ src_u += src.mStride;
}
- dst_ptr += dst.mWidth;
+ dst_ptr = (uint16_t*)((uint8_t*)dst_ptr + dst.mStride);
}
return OK;
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 657a05b..359df3d 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -44,6 +44,7 @@
mNativeWindow(nativeWindow),
mWidth(0),
mHeight(0),
+ mStride(0),
mCropLeft(0),
mCropTop(0),
mCropRight(0),
@@ -67,9 +68,10 @@
int32_t colorFormatNew;
CHECK(format->findInt32("color-format", &colorFormatNew));
- int32_t widthNew, heightNew;
- CHECK(format->findInt32("stride", &widthNew));
+ int32_t widthNew, heightNew, strideNew;
+ CHECK(format->findInt32("width", &widthNew));
CHECK(format->findInt32("slice-height", &heightNew));
+ CHECK(format->findInt32("stride", &strideNew));
int32_t cropLeftNew, cropTopNew, cropRightNew, cropBottomNew;
if (!format->findRect(
@@ -106,6 +108,7 @@
mColorFormat = static_cast<OMX_COLOR_FORMATTYPE>(colorFormatNew);
mWidth = widthNew;
mHeight = heightNew;
+ mStride = strideNew;
mCropLeft = cropLeftNew;
mCropTop = cropTopNew;
mCropRight = cropRightNew;
@@ -276,20 +279,15 @@
if (mConverter) {
mConverter->convert(
data,
- mWidth, mHeight,
+ mWidth, mHeight, mStride,
mCropLeft, mCropTop, mCropRight, mCropBottom,
dst,
- buf->stride, buf->height,
+ buf->stride, buf->height, 0,
0, 0, mCropWidth - 1, mCropHeight - 1);
} else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
- const uint8_t *src_y = (const uint8_t *)data;
- const uint8_t *src_u =
- (const uint8_t *)data + mWidth * mHeight;
- const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
-
- src_y +=mCropLeft + mCropTop * mWidth;
- src_u +=(mCropLeft + mCropTop * mWidth / 2)/2;
- src_v +=(mCropLeft + mCropTop * mWidth / 2)/2;
+ const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft;
+ const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
+ const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
uint8_t *dst_y = (uint8_t *)dst;
size_t dst_y_size = buf->stride * buf->height;
@@ -305,7 +303,7 @@
for (int y = 0; y < mCropHeight; ++y) {
memcpy(dst_y, src_y, mCropWidth);
- src_y += mWidth;
+ src_y += mStride;
dst_y += buf->stride;
}
@@ -313,19 +311,15 @@
memcpy(dst_u, src_u, (mCropWidth + 1) / 2);
memcpy(dst_v, src_v, (mCropWidth + 1) / 2);
- src_u += mWidth / 2;
- src_v += mWidth / 2;
+ src_u += mStride / 2;
+ src_v += mStride / 2;
dst_u += dst_c_stride;
dst_v += dst_c_stride;
}
} else if (mColorFormat == OMX_COLOR_FormatYUV420Planar16) {
- const uint16_t *src_y = (const uint16_t *)data;
- const uint16_t *src_u = (const uint16_t *)data + mWidth * mHeight;
- const uint16_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
-
- src_y += mCropLeft + mCropTop * mWidth;
- src_u += (mCropLeft + mCropTop * mWidth / 2) / 2;
- src_v += (mCropLeft + mCropTop * mWidth / 2) / 2;
+ const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft * 2;
+ const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
+ const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
uint8_t *dst_y = (uint8_t *)dst;
size_t dst_y_size = buf->stride * buf->height;
@@ -340,21 +334,21 @@
for (int y = 0; y < mCropHeight; ++y) {
for (int x = 0; x < mCropWidth; ++x) {
- dst_y[x] = (uint8_t)(src_y[x] >> 2);
+ dst_y[x] = (uint8_t)(((uint16_t *)src_y)[x] >> 2);
}
- src_y += mWidth;
+ src_y += mStride;
dst_y += buf->stride;
}
for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
for (int x = 0; x < (mCropWidth + 1) / 2; ++x) {
- dst_u[x] = (uint8_t)(src_u[x] >> 2);
- dst_v[x] = (uint8_t)(src_v[x] >> 2);
+ dst_u[x] = (uint8_t)(((uint16_t *)src_u)[x] >> 2);
+ dst_v[x] = (uint8_t)(((uint16_t *)src_v)[x] >> 2);
}
- src_u += mWidth / 2;
- src_v += mWidth / 2;
+ src_u += mStride / 2;
+ src_v += mStride / 2;
dst_u += dst_c_stride;
dst_v += dst_c_stride;
}
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 1695c75..a32cf08 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -50,6 +50,7 @@
const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index 25be89f..b165bcb 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -52,6 +52,7 @@
extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index c286516..64dca4e 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -51,7 +51,7 @@
ColorConverter *mConverter;
YUVMode mYUVMode;
sp<ANativeWindow> mNativeWindow;
- int32_t mWidth, mHeight;
+ int32_t mWidth, mHeight, mStride;
int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
int32_t mCropWidth, mCropHeight;
int32_t mRotationDegrees;
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 97d15a7..1137cf1 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -137,6 +137,7 @@
kWhatOMXDied = 'OMXd',
kWhatReleaseCodecInstance = 'relC',
kWhatForceStateTransition = 'fstt',
+ kWhatCheckIfStuck = 'Cstk',
};
enum {
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index 5b3543d..2b8c7c8 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -37,11 +37,11 @@
status_t convert(
const void *srcBits,
- size_t srcWidth, size_t srcHeight,
+ size_t srcWidth, size_t srcHeight, size_t srcStride,
size_t srcCropLeft, size_t srcCropTop,
size_t srcCropRight, size_t srcCropBottom,
void *dstBits,
- size_t dstWidth, size_t dstHeight,
+ size_t dstWidth, size_t dstHeight, size_t dstStride,
size_t dstCropLeft, size_t dstCropTop,
size_t dstCropRight, size_t dstCropBottom);
@@ -49,7 +49,7 @@
struct BitmapParams {
BitmapParams(
void *bits,
- size_t width, size_t height,
+ size_t width, size_t height, size_t stride,
size_t cropLeft, size_t cropTop,
size_t cropRight, size_t cropBottom,
OMX_COLOR_FORMATTYPE colorFromat);
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
index fb9f5bd..d5f4b35 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -22,7 +22,6 @@
#include <media/IMediaExtractor.h>
#include <media/MediaExtractor.h>
-#include <utils/List.h>
namespace android {
@@ -40,15 +39,15 @@
private:
static Mutex gPluginMutex;
- static std::shared_ptr<List<sp<ExtractorPlugin>>> gPlugins;
+ static std::shared_ptr<std::list<sp<ExtractorPlugin>>> gPlugins;
static bool gPluginsRegistered;
static void RegisterExtractorsInApk(
- const char *apkPath, List<sp<ExtractorPlugin>> &pluginList);
+ const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList);
static void RegisterExtractorsInSystem(
- const char *libDirPath, List<sp<ExtractorPlugin>> &pluginList);
+ const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
static void RegisterExtractor(
- const sp<ExtractorPlugin> &plugin, List<sp<ExtractorPlugin>> &pluginList);
+ const sp<ExtractorPlugin> &plugin, std::list<sp<ExtractorPlugin>> &pluginList);
static MediaExtractor::CreatorFunc sniff(DataSourceBase *source,
float *confidence, void **meta, MediaExtractor::FreeMetaFunc *freeMeta,
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 5cc5093..cc31815 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -119,6 +119,7 @@
private:
struct StreamInfo {
unsigned mType;
+ unsigned mTypeExt;
unsigned mPID;
int32_t mCASystemId;
};
@@ -145,10 +146,12 @@
Stream(Program *program,
unsigned elementaryPID,
unsigned streamType,
+ unsigned streamTypeExt,
unsigned PCR_PID,
int32_t CA_system_ID);
unsigned type() const { return mStreamType; }
+ unsigned typeExt() const { return mStreamTypeExt; }
unsigned pid() const { return mElementaryPID; }
void setPID(unsigned pid) { mElementaryPID = pid; }
@@ -194,6 +197,7 @@
Program *mProgram;
unsigned mElementaryPID;
unsigned mStreamType;
+ unsigned mStreamTypeExt;
unsigned mPCR_PID;
int32_t mExpectedContinuityCounter;
@@ -447,7 +451,7 @@
if (descriptor_length > infoLength) {
break;
}
- if (descriptor_tag == 9 && descriptor_length >= 4) {
+ if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
found = true;
caDescriptor->mSystemID = br->getBits(16);
caDescriptor->mPID = br->getBits(16) & 0x1fff;
@@ -513,37 +517,65 @@
// infoBytesRemaining is the number of bytes that make up the
// variable length section of ES_infos. It does not include the
// final CRC.
- size_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
+ int32_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
while (infoBytesRemaining >= 5) {
-
- unsigned streamType = br->getBits(8);
- ALOGV(" stream_type = 0x%02x", streamType);
-
+ StreamInfo info;
+ info.mType = br->getBits(8);
+ ALOGV(" stream_type = 0x%02x", info.mType);
MY_LOGV(" reserved = %u", br->getBits(3));
- unsigned elementaryPID = br->getBits(13);
- ALOGV(" elementary_PID = 0x%04x", elementaryPID);
+ info.mPID = br->getBits(13);
+ ALOGV(" elementary_PID = 0x%04x", info.mPID);
MY_LOGV(" reserved = %u", br->getBits(4));
unsigned ES_info_length = br->getBits(12);
ALOGV(" ES_info_length = %u", ES_info_length);
+ infoBytesRemaining -= 5 + ES_info_length;
CADescriptor streamCA;
- bool hasStreamCA = findCADescriptor(br, ES_info_length, &streamCA);
+ info.mTypeExt = EXT_DESCRIPTOR_DVB_RESERVED_MAX;
+ bool hasStreamCA = false;
+ while (ES_info_length > 2 && infoBytesRemaining >= 0) {
+ unsigned descriptor_tag = br->getBits(8);
+ ALOGV(" tag = 0x%02x", descriptor_tag);
+
+ unsigned descriptor_length = br->getBits(8);
+ ALOGV(" len = %u", descriptor_length);
+
+ ES_info_length -= 2;
+ if (descriptor_length > ES_info_length) {
+ return ERROR_MALFORMED;
+ }
+ if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
+ hasStreamCA = true;
+ streamCA.mSystemID = br->getBits(16);
+ streamCA.mPID = br->getBits(16) & 0x1fff;
+ ES_info_length -= 4;
+ streamCA.mPrivateData.assign(br->data(), br->data() + descriptor_length - 4);
+ } else if (info.mType == STREAMTYPE_PES_PRIVATE_DATA &&
+ descriptor_tag == DESCRIPTOR_DVB_EXTENSION && descriptor_length >= 1) {
+ unsigned descTagExt = br->getBits(8);
+ ALOGV(" tag_ext = 0x%02x", descTagExt);
+ if (descTagExt == EXT_DESCRIPTOR_DVB_AC4) {
+ info.mTypeExt = EXT_DESCRIPTOR_DVB_AC4;
+ }
+ ES_info_length -= descriptor_length;
+ descriptor_length--;
+ br->skipBits(descriptor_length * 8);
+ } else {
+ ES_info_length -= descriptor_length;
+ br->skipBits(descriptor_length * 8);
+ }
+ }
if (hasStreamCA && !mParser->mCasManager->addStream(
- mProgramNumber, elementaryPID, streamCA)) {
+ mProgramNumber, info.mPID, streamCA)) {
return ERROR_MALFORMED;
}
- StreamInfo info;
- info.mType = streamType;
- info.mPID = elementaryPID;
info.mCASystemId = hasProgramCA ? programCA.mSystemID :
hasStreamCA ? streamCA.mSystemID : -1;
infos.push(info);
-
- infoBytesRemaining -= 5 + ES_info_length;
}
if (infoBytesRemaining != 0) {
@@ -602,7 +634,7 @@
if (index < 0) {
sp<Stream> stream = new Stream(
- this, info.mPID, info.mType, PCR_PID, info.mCASystemId);
+ this, info.mPID, info.mType, info.mTypeExt, PCR_PID, info.mCASystemId);
if (mSampleAesKeyItem != NULL) {
stream->signalNewSampleAesKey(mSampleAesKeyItem);
@@ -720,11 +752,13 @@
Program *program,
unsigned elementaryPID,
unsigned streamType,
+ unsigned streamTypeExt,
unsigned PCR_PID,
int32_t CA_system_ID)
: mProgram(program),
mElementaryPID(elementaryPID),
mStreamType(streamType),
+ mStreamTypeExt(streamTypeExt),
mPCR_PID(PCR_PID),
mExpectedContinuityCounter(-1),
mPayloadStarted(false),
@@ -781,6 +815,12 @@
mode = ElementaryStreamQueue::AC3;
break;
+ case STREAMTYPE_PES_PRIVATE_DATA:
+ if (mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4) {
+ mode = ElementaryStreamQueue::AC4;
+ }
+ break;
+
case STREAMTYPE_METADATA:
mode = ElementaryStreamQueue::METADATA;
break;
@@ -989,6 +1029,8 @@
case STREAMTYPE_AAC_ENCRYPTED:
case STREAMTYPE_AC3_ENCRYPTED:
return true;
+ case STREAMTYPE_PES_PRIVATE_DATA:
+ return mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4;
default:
return false;
@@ -1395,7 +1437,7 @@
// Perform the 1st pass descrambling if needed
if (descrambleBytes > 0) {
memcpy(mDescrambledBuffer->data(), mBuffer->data(), descrambleBytes);
- mDescrambledBuffer->setRange(0, descrambleBytes);
+ mDescrambledBuffer->setRange(0, mBuffer->size());
hidl_vec<SubSample> subSamples;
subSamples.resize(descrambleSubSamples);
@@ -1412,10 +1454,9 @@
}
}
- uint64_t srcOffset = 0, dstOffset = 0;
- // If scrambled at PES-level, PES header should be skipped
+ // If scrambled at PES-level, PES header is in the clear
if (pesScramblingControl != 0) {
- srcOffset = dstOffset = pesOffset;
+ subSamples[0].numBytesOfClearData = pesOffset;
subSamples[0].numBytesOfEncryptedData -= pesOffset;
}
@@ -1431,9 +1472,9 @@
(ScramblingControl) sctrl,
subSamples,
mDescramblerSrcBuffer,
- srcOffset,
+ 0 /*srcOffset*/,
dstBuffer,
- dstOffset,
+ 0 /*dstOffset*/,
[&status, &bytesWritten, &detailedError] (
Status _status, uint32_t _bytesWritten,
const hidl_string& _detailedError) {
@@ -1450,9 +1491,15 @@
ALOGV("[stream %d] descramble succeeded, %d bytes",
mElementaryPID, bytesWritten);
- memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
+
+ // Set descrambleBytes to the returned result.
+ // Note that this might be smaller than the total length of input data.
+ // (eg. when we're descrambling the PES header portion of a secure stream,
+ // the plugin might cut it off right after the PES header.)
+ descrambleBytes = bytesWritten;
}
+ sp<ABuffer> buffer;
if (mQueue->isScrambled()) {
// Queue subSample info for scrambled queue
sp<ABuffer> clearSizesBuffer = new ABuffer(mSubSamples.size() * 4);
@@ -1464,8 +1511,7 @@
for (auto it = mSubSamples.begin();
it != mSubSamples.end(); it++, i++) {
if ((it->transport_scrambling_mode == 0
- && pesScramblingControl == 0)
- || i < descrambleSubSamples) {
+ && pesScramblingControl == 0)) {
clearSizePtr[i] = it->subSampleSize;
encSizePtr[i] = 0;
} else {
@@ -1474,14 +1520,26 @@
}
isSync |= it->random_access_indicator;
}
+
+ // If scrambled at PES-level, PES header is in the clear
+ if (pesScramblingControl != 0) {
+ clearSizePtr[0] = pesOffset;
+ encSizePtr[0] -= pesOffset;
+ }
// Pass the original TS subsample size now. The PES header adjust
// will be applied when the scrambled AU is dequeued.
mQueue->appendScrambledData(
mBuffer->data(), mBuffer->size(), sctrl,
isSync, clearSizesBuffer, encSizesBuffer);
+
+ buffer = mDescrambledBuffer;
+ } else {
+ memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
+
+ buffer = mBuffer;
}
- ABitReader br(mBuffer->data(), mBuffer->size());
+ ABitReader br(buffer->data(), buffer->size());
status_t err = parsePES(&br, event);
if (err != OK) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 45ca06b..adb4fb2 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -142,6 +142,7 @@
STREAMTYPE_MPEG2_VIDEO = 0x02,
STREAMTYPE_MPEG1_AUDIO = 0x03,
STREAMTYPE_MPEG2_AUDIO = 0x04,
+ STREAMTYPE_PES_PRIVATE_DATA = 0x06,
STREAMTYPE_MPEG2_AUDIO_ADTS = 0x0f,
STREAMTYPE_MPEG4_VIDEO = 0x10,
STREAMTYPE_METADATA = 0x15,
@@ -160,6 +161,20 @@
STREAMTYPE_AC3_ENCRYPTED = 0xC1,
};
+ enum {
+ // From ISO/IEC 13818-1: 2007 (E), Table 2-29
+ DESCRIPTOR_CA = 0x09,
+
+ // DVB BlueBook A038 Table 12
+ DESCRIPTOR_DVB_EXTENSION = 0x7F,
+ };
+
+ // DVB BlueBook A038 Table 109
+ enum {
+ EXT_DESCRIPTOR_DVB_AC4 = 0x15,
+ EXT_DESCRIPTOR_DVB_RESERVED_MAX = 0x7F,
+ };
+
protected:
virtual ~ATSParser();
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index ece0692..9e154a3 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -226,6 +226,7 @@
int32_t cryptoMode;
if (buffer->meta()->findInt32("cryptoMode", &cryptoMode)) {
int32_t cryptoKey;
+ int32_t pesOffset;
sp<ABuffer> clearBytesBuffer, encBytesBuffer;
CHECK(buffer->meta()->findInt32("cryptoKey", &cryptoKey));
@@ -233,6 +234,8 @@
&& clearBytesBuffer != NULL);
CHECK(buffer->meta()->findBuffer("encBytes", &encBytesBuffer)
&& encBytesBuffer != NULL);
+ CHECK(buffer->meta()->findInt32("pesOffset", &pesOffset)
+ && (pesOffset >= 0) && (pesOffset < 65536));
bufmeta.setInt32(kKeyCryptoMode, cryptoMode);
@@ -240,6 +243,11 @@
bufmeta.setData(kKeyCryptoIV, 0, array, 16);
array[0] = (uint8_t) (cryptoKey & 0xff);
+ // array[1] contains PES header flag, which we don't use.
+ // array[2~3] contain the PES offset.
+ array[2] = (uint8_t) (pesOffset & 0xff);
+ array[3] = (uint8_t) ((pesOffset >> 8) & 0xff);
+
bufmeta.setData(kKeyCryptoKey, 0, array, 16);
bufmeta.setData(kKeyPlainSizes, 0,
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 0fa9fcb..34d0bcc 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -86,6 +86,21 @@
mCasSessionId = sessionId;
}
+static int32_t readVariableBits(ABitReader &bits, int32_t nbits) {
+ int32_t value = 0;
+ int32_t more_bits = 1;
+
+ while (more_bits) {
+ value += bits.getBits(nbits);
+ more_bits = bits.getBits(1);
+ if (!more_bits)
+ break;
+ value++;
+ value <<= nbits;
+ }
+ return value;
+}
+
// Parse AC3 header assuming the current ptr is start position of syncframe,
// update metadata only applicable, and return the payload size
static unsigned parseAC3SyncFrame(
@@ -199,6 +214,78 @@
return parseAC3SyncFrame(ptr, size, NULL) > 0;
}
+// Parse AC4 header assuming the current ptr is start position of syncframe
+// and update frameSize and metadata.
+static status_t parseAC4SyncFrame(
+ const uint8_t *ptr, size_t size, unsigned &frameSize, sp<MetaData> *metaData) {
+ // ETSI TS 103 190-2 V1.1.1 (2015-09), Annex C
+ // The sync_word can be either 0xAC40 or 0xAC41.
+ static const int kSyncWordAC40 = 0xAC40;
+ static const int kSyncWordAC41 = 0xAC41;
+
+ size_t headerSize = 0;
+ ABitReader bits(ptr, size);
+ int32_t syncWord = bits.getBits(16);
+ if ((syncWord != kSyncWordAC40) && (syncWord != kSyncWordAC41)) {
+ ALOGE("Invalid syncword in AC4 header");
+ return ERROR_MALFORMED;
+ }
+ headerSize += 2;
+
+ frameSize = bits.getBits(16);
+ headerSize += 2;
+ if (frameSize == 0xFFFF) {
+ frameSize = bits.getBits(24);
+ headerSize += 3;
+ }
+
+ if (frameSize == 0) {
+ ALOGE("Invalid frame size in AC4 header");
+ return ERROR_MALFORMED;
+ }
+ frameSize += headerSize;
+ // If the sync_word is 0xAC41, a crc_word is also transmitted.
+ if (syncWord == kSyncWordAC41) {
+ frameSize += 2; // crc_word
+ }
+ ALOGV("AC4 frameSize = %u", frameSize);
+
+ // ETSI TS 103 190-2 V1.1.1 6.2.1.1
+ uint32_t bitstreamVersion = bits.getBits(2);
+ if (bitstreamVersion == 3) {
+ bitstreamVersion += readVariableBits(bits, 2);
+ }
+
+ bits.skipBits(10); // Sequence Counter
+
+ uint32_t bWaitFrames = bits.getBits(1);
+ if (bWaitFrames) {
+ uint32_t waitFrames = bits.getBits(3);
+ if (waitFrames > 0) {
+ bits.skipBits(2); // br_code;
+ }
+ }
+
+ // ETSI TS 103 190 V1.1.1 Table 82
+ bool fsIndex = bits.getBits(1);
+ uint32_t samplingRate = fsIndex ? 48000 : 44100;
+
+ if (metaData != NULL) {
+ ALOGV("dequeueAccessUnitAC4 Setting mFormat");
+ (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+ (*metaData)->setInt32(kKeyIsSyncFrame, 1);
+ // [FIXME] AC4 channel count is defined per presentation. Provide a default channel count
+ // as stereo for the entire stream.
+ (*metaData)->setInt32(kKeyChannelCount, 2);
+ (*metaData)->setInt32(kKeySampleRate, samplingRate);
+ }
+ return OK;
+}
+
+static status_t IsSeeminglyValidAC4Header(const uint8_t *ptr, size_t size, unsigned &frameSize) {
+ return parseAC4SyncFrame(ptr, size, frameSize, NULL);
+}
+
static bool IsSeeminglyValidADTSHeader(
const uint8_t *ptr, size_t size, size_t *frameLength) {
if (size < 7) {
@@ -416,6 +503,42 @@
break;
}
+ case AC4:
+ {
+ uint8_t *ptr = (uint8_t *)data;
+ unsigned frameSize = 0;
+ ssize_t startOffset = -1;
+
+ // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+ // (Sync header 4 bytes + AC4 toc 3 bytes)
+ if (size < 7) {
+ return ERROR_MALFORMED;
+ }
+ for (size_t i = 0; i < size; ++i) {
+ if (IsSeeminglyValidAC4Header(&ptr[i], size - i, frameSize) == OK) {
+ startOffset = i;
+ break;
+ }
+ }
+
+ if (startOffset < 0) {
+ return ERROR_MALFORMED;
+ }
+
+ if (startOffset > 0) {
+ ALOGI("found something resembling an AC4 syncword at offset %zd",
+ startOffset);
+ }
+ if (frameSize != size - startOffset) {
+ ALOGV("AC4 frame size is %u bytes, while the buffer size is %zd bytes.",
+ frameSize, size - startOffset);
+ }
+
+ data = &ptr[startOffset];
+ size -= startOffset;
+ break;
+ }
+
case MPEG_AUDIO:
{
uint8_t *ptr = (uint8_t *)data;
@@ -568,25 +691,9 @@
return NULL;
}
- // skip the PES header, and copy the rest into scrambled access unit
+ // copy into scrambled access unit
sp<ABuffer> scrambledAccessUnit = ABuffer::CreateAsCopy(
- mScrambledBuffer->data() + pesOffset,
- scrambledLength - pesOffset);
-
- // fix up first sample size after skipping the PES header
- if (pesOffset > 0) {
- int32_t &firstClearSize = *(int32_t*)clearSizes->data();
- int32_t &firstEncSize = *(int32_t*)encSizes->data();
- // Cut away the PES header
- if (firstClearSize >= pesOffset) {
- // This is for TS-level scrambling, we descrambled the first
- // (or it was clear to begin with)
- firstClearSize -= pesOffset;
- } else if (firstEncSize >= pesOffset) {
- // This can only be PES-level scrambling
- firstEncSize -= pesOffset;
- }
- }
+ mScrambledBuffer->data(), scrambledLength);
scrambledAccessUnit->meta()->setInt64("timeUs", timeUs);
if (isSync) {
@@ -600,6 +707,7 @@
scrambledAccessUnit->meta()->setInt32("cryptoKey", keyId);
scrambledAccessUnit->meta()->setBuffer("clearBytes", clearSizes);
scrambledAccessUnit->meta()->setBuffer("encBytes", encSizes);
+ scrambledAccessUnit->meta()->setInt32("pesOffset", pesOffset);
memmove(mScrambledBuffer->data(),
mScrambledBuffer->data() + scrambledLength,
@@ -649,6 +757,8 @@
return dequeueAccessUnitAAC();
case AC3:
return dequeueAccessUnitAC3();
+ case AC4:
+ return dequeueAccessUnitAC4();
case MPEG_VIDEO:
return dequeueAccessUnitMPEGVideo();
case MPEG4_VIDEO:
@@ -730,6 +840,69 @@
return accessUnit;
}
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC4() {
+ unsigned syncStartPos = 0;
+ unsigned payloadSize = 0;
+ sp<MetaData> format = new MetaData;
+ ALOGV("dequeueAccessUnit_AC4[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+
+ // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+ // (Sync header 4 bytes + AC4 toc 3 bytes)
+ if (mBuffer->size() < 7) {
+ return NULL;
+ }
+
+ while (true) {
+ if (syncStartPos + 2 >= mBuffer->size()) {
+ return NULL;
+ }
+
+ status_t status = parseAC4SyncFrame(
+ mBuffer->data() + syncStartPos,
+ mBuffer->size() - syncStartPos,
+ payloadSize,
+ &format);
+ if (status == OK) {
+ break;
+ }
+
+ ALOGV("dequeueAccessUnit_AC4[%d]: syncStartPos %u payloadSize %u",
+ mAUIndex, syncStartPos, payloadSize);
+
+ ++syncStartPos;
+ }
+
+ if (mBuffer->size() < syncStartPos + payloadSize) {
+ ALOGV("Not enough buffer size for AC4");
+ return NULL;
+ }
+
+ if (mFormat == NULL) {
+ mFormat = format;
+ }
+
+ int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
+ if (timeUs < 0ll) {
+ ALOGE("negative timeUs");
+ return NULL;
+ }
+ mAUIndex++;
+
+ sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+ memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
+ accessUnit->meta()->setInt64("timeUs", timeUs);
+ accessUnit->meta()->setInt32("isSync", 1);
+
+ memmove(
+ mBuffer->data(),
+ mBuffer->data() + syncStartPos + payloadSize,
+ mBuffer->size() - syncStartPos - payloadSize);
+
+ mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize);
+ return accessUnit;
+}
+
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitPCMAudio() {
if (mBuffer->size() < 4) {
return NULL;
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index ffcb502..399214a 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -38,6 +38,7 @@
H264,
AAC,
AC3,
+ AC4,
MPEG_AUDIO,
MPEG_VIDEO,
MPEG4_VIDEO,
@@ -116,6 +117,7 @@
sp<ABuffer> dequeueAccessUnitH264();
sp<ABuffer> dequeueAccessUnitAAC();
sp<ABuffer> dequeueAccessUnitAC3();
+ sp<ABuffer> dequeueAccessUnitAC4();
sp<ABuffer> dequeueAccessUnitMPEGAudio();
sp<ABuffer> dequeueAccessUnitMPEGVideo();
sp<ABuffer> dequeueAccessUnitMPEG4Video();
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 935dc34..672a37c 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -154,12 +154,12 @@
outDef->format.video.nFrameWidth = outputBufferWidth();
outDef->format.video.nFrameHeight = outputBufferHeight();
outDef->format.video.eColorFormat = mOutputFormat;
- outDef->format.video.nStride = outDef->format.video.nFrameWidth;
outDef->format.video.nSliceHeight = outDef->format.video.nFrameHeight;
int32_t bpp = (mOutputFormat == OMX_COLOR_FormatYUV420Planar16) ? 2 : 1;
+ outDef->format.video.nStride = outDef->format.video.nFrameWidth * bpp;
outDef->nBufferSize =
- (outDef->format.video.nStride * outDef->format.video.nSliceHeight * bpp * 3) / 2;
+ (outDef->format.video.nStride * outDef->format.video.nSliceHeight * 3) / 2;
OMX_PARAM_PORTDEFINITIONTYPE *inDef = &editPortInfo(kInputPortIndex)->mDef;
inDef->format.video.nFrameWidth = mWidth;
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 6d10f1c..5597488 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -309,6 +309,7 @@
}
String8 defaultUrl;
DrmPlugin::KeyRequestType keyRequestType;
+ mObj->mKeyRequest.clear();
status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType),
mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl,
&keyRequestType);
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index d6dae5b..de8e46a 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -21,9 +21,11 @@
"MemoryLeakTrackUtil.cpp",
"ProcessInfo.cpp",
"SchedulingPolicyService.cpp",
+ "ServiceUtilities.cpp",
],
shared_libs: [
"libbinder",
+ "libcutils",
"liblog",
"libutils",
"libmemunreachable",
diff --git a/services/audioflinger/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
similarity index 84%
rename from services/audioflinger/ServiceUtilities.cpp
rename to media/utils/ServiceUtilities.cpp
index aa267ea..0d50be0 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -14,12 +14,13 @@
* limitations under the License.
*/
+#define LOG_TAG "ServiceUtilities"
+
#include <binder/AppOpsManager.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/PermissionCache.h>
-#include <private/android_filesystem_config.h>
-#include "ServiceUtilities.h"
+#include "mediautils/ServiceUtilities.h"
/* When performing permission checks we do not use permission cache for
* runtime permissions (protection level dangerous) as they may change at
@@ -32,24 +33,6 @@
static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
-// Not valid until initialized by AudioFlinger constructor. It would have to be
-// re-initialized if the process containing AudioFlinger service forks (which it doesn't).
-// This is often used to validate binder interface calls within audioserver
-// (e.g. AudioPolicyManager to AudioFlinger).
-pid_t getpid_cached;
-
-// A trusted calling UID may specify the client UID as part of a binder interface call.
-// otherwise the calling UID must be equal to the client UID.
-bool isTrustedCallingUid(uid_t uid) {
- switch (uid) {
- case AID_MEDIA:
- case AID_AUDIOSERVER:
- return true;
- default:
- return false;
- }
-}
-
static String16 resolveCallingPackage(PermissionController& permissionController,
const String16& opPackageName, uid_t uid) {
if (opPackageName.size() > 0) {
@@ -71,16 +54,11 @@
return packages[0];
}
-static inline bool isAudioServerOrRoot(uid_t uid) {
- // AID_ROOT is OK for command-line tests. Native unforked audioserver always OK.
- return uid == AID_ROOT || uid == AID_AUDIOSERVER ;
-}
-
static bool checkRecordingInternal(const String16& opPackageName, pid_t pid,
uid_t uid, bool start) {
// Okay to not track in app ops as audio server is us and if
// device is rooted security model is considered compromised.
- if (isAudioServerOrRoot(uid)) return true;
+ if (isAudioServerOrRootUid(uid)) return true;
// We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
// may open a record track on behalf of a client. Note that pid may be a tid.
@@ -127,7 +105,7 @@
void finishRecording(const String16& opPackageName, uid_t uid) {
// Okay to not track in app ops as audio server is us and if
// device is rooted security model is considered compromised.
- if (isAudioServerOrRoot(uid)) return;
+ if (isAudioServerOrRootUid(uid)) return;
PermissionController permissionController;
String16 resolvedOpPackageName = resolveCallingPackage(
@@ -142,7 +120,7 @@
}
bool captureAudioOutputAllowed(pid_t pid, uid_t uid) {
- if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
+ if (isAudioServerOrRootUid(uid)) return true;
static const String16 sCaptureAudioOutput("android.permission.CAPTURE_AUDIO_OUTPUT");
bool ok = PermissionCache::checkPermission(sCaptureAudioOutput, pid, uid);
if (!ok) ALOGE("Request requires android.permission.CAPTURE_AUDIO_OUTPUT");
@@ -163,7 +141,8 @@
}
bool settingsAllowed() {
- if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
+ // given this is a permission check, could this be isAudioServerOrRootUid()?
+ if (isAudioServerUid(IPCThreadState::self()->getCallingUid())) return true;
static const String16 sAudioSettings("android.permission.MODIFY_AUDIO_SETTINGS");
// IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
bool ok = PermissionCache::checkCallingPermission(sAudioSettings);
@@ -180,7 +159,6 @@
}
bool dumpAllowed() {
- // don't optimize for same pid, since mediaserver never dumps itself
static const String16 sDump("android.permission.DUMP");
// IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
bool ok = PermissionCache::checkCallingPermission(sDump);
@@ -196,4 +174,29 @@
return ok;
}
+status_t checkIMemory(const sp<IMemory>& iMemory)
+{
+ if (iMemory == 0) {
+ ALOGE("%s check failed: NULL IMemory pointer", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ sp<IMemoryHeap> heap = iMemory->getMemory();
+ if (heap == 0) {
+ ALOGE("%s check failed: NULL heap pointer", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ off_t size = lseek(heap->getHeapID(), 0, SEEK_END);
+ lseek(heap->getHeapID(), 0, SEEK_SET);
+
+ if (iMemory->pointer() == NULL || size < (off_t)iMemory->size()) {
+ ALOGE("%s check failed: pointer %p size %zu fd size %u",
+ __FUNCTION__, iMemory->pointer(), iMemory->size(), (uint32_t)size);
+ return BAD_VALUE;
+ }
+
+ return NO_ERROR;
+}
+
} // namespace android
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
new file mode 100644
index 0000000..0911744
--- /dev/null
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include <binder/IMemory.h>
+#include <binder/PermissionController.h>
+#include <cutils/multiuser.h>
+#include <private/android_filesystem_config.h>
+
+namespace android {
+
+// Audio permission utilities
+
+// Used for calls that should originate from system services.
+// We allow that some services might have separate processes to
+// handle multiple users, e.g. u10_system, u10_bluetooth, u10_radio.
+static inline bool isServiceUid(uid_t uid) {
+ return multiuser_get_app_id(uid) < AID_APP_START;
+}
+
+// Used for calls that should originate from audioserver.
+static inline bool isAudioServerUid(uid_t uid) {
+ return uid == AID_AUDIOSERVER;
+}
+
+// Used for some permission checks.
+// AID_ROOT is OK for command-line tests. Native audioserver always OK.
+static inline bool isAudioServerOrRootUid(uid_t uid) {
+ return uid == AID_AUDIOSERVER || uid == AID_ROOT;
+}
+
+// Used for calls that should come from system server or internal.
+// Note: system server is multiprocess for multiple users. audioserver is not.
+static inline bool isAudioServerOrSystemServerUid(uid_t uid) {
+ return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER;
+}
+
+// Mediaserver may forward the client PID and UID as part of a binder interface call;
+// otherwise the calling UID must be equal to the client UID.
+static inline bool isAudioServerOrMediaServerUid(uid_t uid) {
+ switch (uid) {
+ case AID_MEDIA:
+ case AID_AUDIOSERVER:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
+void finishRecording(const String16& opPackageName, uid_t uid);
+bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
+bool captureHotwordAllowed(pid_t pid, uid_t uid);
+bool settingsAllowed();
+bool modifyAudioRoutingAllowed();
+bool dumpAllowed();
+bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
+status_t checkIMemory(const sp<IMemory>& iMemory);
+}
diff --git a/packages/MediaComponents/Android.mk b/packages/MediaComponents/Android.mk
index def9dc5..55a5424 100644
--- a/packages/MediaComponents/Android.mk
+++ b/packages/MediaComponents/Android.mk
@@ -42,7 +42,7 @@
#
#LOCAL_MULTILIB := first
#
-#LOCAL_JAVA_LIBRARIES += android-support-annotations
+#LOCAL_JAVA_LIBRARIES += androidx.annotation_annotation
#
## To embed native libraries in package, uncomment the lines below.
##LOCAL_MODULE_TAGS := samples
@@ -60,9 +60,9 @@
#
## TODO: Remove dependency with other support libraries.
#LOCAL_STATIC_ANDROID_LIBRARIES += \
-# android-support-v4 \
-# android-support-v7-appcompat \
-# android-support-v7-palette
+# androidx.legacy_legacy-support-v4 \
+# androidx.appcompat_appcompat \
+# androidx.palette_palette
#LOCAL_USE_AAPT2 := true
#
#include $(BUILD_PACKAGE)
diff --git a/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml b/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
index b304471..f6f7be5 100644
--- a/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
+++ b/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
@@ -169,7 +169,7 @@
android:layout_height="wrap_content"
android:fillViewport="true"
android:scrollIndicators="top|bottom">
- <android.support.v7.widget.ButtonBarLayout
+ <androidx.appcompat.widget.ButtonBarLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:gravity="bottom"
@@ -184,7 +184,7 @@
style="?android:attr/buttonBarNeutralButtonStyle"
android:layout_width="wrap_content"
android:layout_height="wrap_content"/>
- <android.support.v4.widget.Space
+ <androidx.legacy.widget.Space
android:id="@+id/spacer"
android:layout_width="0dp"
android:layout_height="0dp"
@@ -200,7 +200,7 @@
style="?android:attr/buttonBarPositiveButtonStyle"
android:layout_width="wrap_content"
android:layout_height="wrap_content"/>
- </android.support.v7.widget.ButtonBarLayout>
+ </androidx.appcompat.widget.ButtonBarLayout>
</ScrollView>
</LinearLayout>
</FrameLayout>
diff --git a/packages/MediaComponents/res/layout/mr_controller_volume_item.xml b/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
index a89058b..12d85ae 100644
--- a/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
+++ b/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
@@ -40,7 +40,7 @@
android:layout_marginBottom="8dp"
android:scaleType="fitCenter"
android:src="?attr/mediaRouteAudioTrackDrawable" />
- <android.support.v7.app.MediaRouteVolumeSlider
+ <androidx.mediarouter.app.MediaRouteVolumeSlider
android:id="@+id/mr_volume_slider"
android:layout_width="fill_parent"
android:layout_height="40dp"
diff --git a/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java b/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
index c909099..0327beb 100644
--- a/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
@@ -19,7 +19,6 @@
import android.content.Context;
import android.media.MediaBrowser2;
import android.media.MediaBrowser2.BrowserCallback;
-import android.media.MediaController2;
import android.media.MediaItem2;
import android.media.SessionToken2;
import android.media.update.MediaBrowser2Provider;
diff --git a/packages/MediaComponents/src/com/android/media/MediaController2Impl.java b/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
index 249365a..2883087 100644
--- a/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
@@ -16,7 +16,6 @@
package com.android.media;
-import static android.media.SessionCommand2.COMMAND_CODE_SET_VOLUME;
import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_ADD_ITEM;
import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_REMOVE_ITEM;
import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_REPLACE_ITEM;
@@ -30,6 +29,7 @@
import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_MEDIA_ID;
import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_SEARCH;
import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_URI;
+import static android.media.SessionCommand2.COMMAND_CODE_SET_VOLUME;
import android.app.PendingIntent;
import android.content.ComponentName;
@@ -44,11 +44,11 @@
import android.media.MediaMetadata2;
import android.media.MediaPlaylistAgent.RepeatMode;
import android.media.MediaPlaylistAgent.ShuffleMode;
-import android.media.SessionCommand2;
import android.media.MediaSession2.CommandButton;
-import android.media.SessionCommandGroup2;
import android.media.MediaSessionService2;
import android.media.Rating2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
import android.media.SessionToken2;
import android.media.update.MediaController2Provider;
import android.net.Uri;
@@ -58,10 +58,11 @@
import android.os.RemoteException;
import android.os.ResultReceiver;
import android.os.UserHandle;
-import android.support.annotation.GuardedBy;
import android.text.TextUtils;
import android.util.Log;
+import androidx.annotation.GuardedBy;
+
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Executor;
diff --git a/packages/MediaComponents/src/com/android/media/MediaController2Stub.java b/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
index 2cfc5df..ece4a00 100644
--- a/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
+++ b/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
@@ -21,8 +21,8 @@
import android.media.MediaController2;
import android.media.MediaItem2;
import android.media.MediaMetadata2;
-import android.media.SessionCommand2;
import android.media.MediaSession2.CommandButton;
+import android.media.SessionCommand2;
import android.media.SessionCommandGroup2;
import android.os.Bundle;
import android.os.ResultReceiver;
diff --git a/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
index 4ec6042..72ecf54 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
@@ -44,13 +44,13 @@
import android.media.MediaPlaylistAgent.PlaylistEventCallback;
import android.media.MediaSession2;
import android.media.MediaSession2.Builder;
-import android.media.SessionCommand2;
import android.media.MediaSession2.CommandButton;
-import android.media.SessionCommandGroup2;
import android.media.MediaSession2.ControllerInfo;
import android.media.MediaSession2.OnDataSourceMissingHelper;
import android.media.MediaSession2.SessionCallback;
import android.media.MediaSessionService2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
import android.media.SessionToken2;
import android.media.VolumeProvider2;
import android.media.session.MediaSessionManager;
@@ -60,10 +60,11 @@
import android.os.Parcelable;
import android.os.Process;
import android.os.ResultReceiver;
-import android.support.annotation.GuardedBy;
import android.text.TextUtils;
import android.util.Log;
+import androidx.annotation.GuardedBy;
+
import java.lang.ref.WeakReference;
import java.lang.reflect.Field;
import java.util.ArrayList;
diff --git a/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java b/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
index ec657d7..11ccd9f 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
@@ -22,11 +22,11 @@
import android.media.MediaItem2;
import android.media.MediaLibraryService2.LibraryRoot;
import android.media.MediaMetadata2;
-import android.media.SessionCommand2;
import android.media.MediaSession2.CommandButton;
-import android.media.SessionCommandGroup2;
import android.media.MediaSession2.ControllerInfo;
import android.media.Rating2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
import android.media.VolumeProvider2;
import android.net.Uri;
import android.os.Binder;
@@ -35,13 +35,14 @@
import android.os.IBinder;
import android.os.RemoteException;
import android.os.ResultReceiver;
-import android.support.annotation.GuardedBy;
-import android.support.annotation.NonNull;
import android.text.TextUtils;
import android.util.ArrayMap;
import android.util.Log;
import android.util.SparseArray;
+import androidx.annotation.GuardedBy;
+import androidx.annotation.NonNull;
+
import com.android.media.MediaLibraryService2Impl.MediaLibrarySessionImpl;
import com.android.media.MediaSession2Impl.CommandButtonImpl;
import com.android.media.MediaSession2Impl.CommandGroupImpl;
diff --git a/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java b/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
index c33eb65..d975839 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
@@ -20,7 +20,6 @@
import android.app.Notification;
import android.app.NotificationManager;
-import android.content.Context;
import android.content.Intent;
import android.media.MediaPlayerBase;
import android.media.MediaPlayerBase.PlayerEventCallback;
@@ -31,9 +30,10 @@
import android.media.SessionToken2.TokenType;
import android.media.update.MediaSessionService2Provider;
import android.os.IBinder;
-import android.support.annotation.GuardedBy;
import android.util.Log;
+import androidx.annotation.GuardedBy;
+
// TODO(jaewan): Need a test for session service itself.
public class MediaSessionService2Impl implements MediaSessionService2Provider {
diff --git a/packages/MediaComponents/src/com/android/media/Rating2Impl.java b/packages/MediaComponents/src/com/android/media/Rating2Impl.java
index d558129..e2b9f0a 100644
--- a/packages/MediaComponents/src/com/android/media/Rating2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/Rating2Impl.java
@@ -18,7 +18,6 @@
import static android.media.Rating2.*;
-import android.content.Context;
import android.media.Rating2;
import android.media.Rating2.Style;
import android.media.update.Rating2Provider;
diff --git a/packages/MediaComponents/src/com/android/media/RoutePlayer.java b/packages/MediaComponents/src/com/android/media/RoutePlayer.java
index 9450d34..ebff0e2 100644
--- a/packages/MediaComponents/src/com/android/media/RoutePlayer.java
+++ b/packages/MediaComponents/src/com/android/media/RoutePlayer.java
@@ -23,7 +23,8 @@
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
-import android.support.annotation.RequiresApi;
+
+import androidx.annotation.RequiresApi;
import com.android.support.mediarouter.media.MediaItemStatus;
import com.android.support.mediarouter.media.MediaRouter;
@@ -33,8 +34,6 @@
import com.android.support.mediarouter.media.RemotePlaybackClient.SessionActionCallback;
import com.android.support.mediarouter.media.RemotePlaybackClient.StatusCallback;
-import java.util.Map;
-
@RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
public class RoutePlayer extends MediaSession.Callback {
public static final long PLAYBACK_ACTIONS = PlaybackState.ACTION_PAUSE
diff --git a/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java b/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
index a5cf8c4..f792712 100644
--- a/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
@@ -16,9 +16,9 @@
package com.android.media;
+import static android.media.SessionToken2.TYPE_LIBRARY_SERVICE;
import static android.media.SessionToken2.TYPE_SESSION;
import static android.media.SessionToken2.TYPE_SESSION_SERVICE;
-import static android.media.SessionToken2.TYPE_LIBRARY_SERVICE;
import android.content.Context;
import android.content.Intent;
diff --git a/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
index a4d55d7..97d3927 100644
--- a/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
+++ b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
@@ -16,12 +16,8 @@
package com.android.media.subtitle;
-import java.util.Locale;
-import java.util.Vector;
-
import android.content.Context;
import android.media.MediaFormat;
-import android.media.MediaPlayer2;
import android.media.MediaPlayer2.TrackInfo;
import android.os.Handler;
import android.os.Looper;
@@ -30,6 +26,9 @@
import com.android.media.subtitle.SubtitleTrack.RenderingWidget;
+import java.util.Locale;
+import java.util.Vector;
+
// Note: This is forked from android.media.SubtitleController since P
/**
* The subtitle controller provides the architecture to display subtitles for a
diff --git a/packages/MediaComponents/src/com/android/media/update/ApiFactory.java b/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
index d7be549..f75b75e 100644
--- a/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
+++ b/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
@@ -31,13 +31,13 @@
import android.media.MediaMetadata2;
import android.media.MediaPlaylistAgent;
import android.media.MediaSession2;
-import android.media.SessionCommand2;
-import android.media.SessionCommandGroup2;
import android.media.MediaSession2.ControllerInfo;
import android.media.MediaSession2.SessionCallback;
import android.media.MediaSessionService2;
import android.media.MediaSessionService2.MediaNotification;
import android.media.Rating2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
import android.media.SessionToken2;
import android.media.VolumeProvider2;
import android.media.update.MediaBrowser2Provider;
@@ -59,11 +59,12 @@
import android.media.update.VolumeProvider2Provider;
import android.os.Bundle;
import android.os.IInterface;
-import android.support.annotation.Nullable;
import android.util.AttributeSet;
import android.widget.MediaControlView2;
import android.widget.VideoView2;
+import androidx.annotation.Nullable;
+
import com.android.media.IMediaController2;
import com.android.media.MediaBrowser2Impl;
import com.android.media.MediaController2Impl;
diff --git a/packages/MediaComponents/src/com/android/media/update/ApiHelper.java b/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
index ad8bb48..dc5e5e2 100644
--- a/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
+++ b/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
@@ -18,21 +18,21 @@
import android.annotation.Nullable;
import android.content.Context;
-import android.content.ContextWrapper;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageManager.NameNotFoundException;
import android.content.res.Resources;
import android.content.res.Resources.Theme;
import android.content.res.XmlResourceParser;
-import android.support.annotation.GuardedBy;
-import android.support.v4.widget.Space;
-import android.support.v7.widget.ButtonBarLayout;
import android.util.AttributeSet;
import android.view.ContextThemeWrapper;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
+import androidx.annotation.GuardedBy;
+import androidx.appcompat.widget.ButtonBarLayout;
+import androidx.legacy.widget.Space;
+
import com.android.support.mediarouter.app.MediaRouteButton;
import com.android.support.mediarouter.app.MediaRouteExpandCollapseButton;
import com.android.support.mediarouter.app.MediaRouteVolumeSlider;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
index d3e8d47..98c0d17 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
@@ -19,11 +19,12 @@
import android.annotation.NonNull;
import android.annotation.Nullable;
import android.content.Context;
-import android.support.v4.view.ActionProvider;
import android.util.Log;
import android.view.View;
import android.view.ViewGroup;
+import androidx.core.view.ActionProvider;
+
import com.android.support.mediarouter.media.MediaRouteSelector;
import com.android.support.mediarouter.media.MediaRouter;
@@ -48,7 +49,7 @@
* <h3>Prerequisites</h3>
* <p>
* To use the media route action provider, the activity must be a subclass of
- * {@link AppCompatActivity} from the <code>android.support.v7.appcompat</code>
+ * {@link AppCompatActivity} from the <code>androidx.appcompat.appcompat</code>
* support library. Refer to support library documentation for details.
* </p>
*
@@ -65,7 +66,7 @@
* <item android:id="@+id/media_route_menu_item"
* android:title="@string/media_route_menu_title"
* app:showAsAction="always"
- * app:actionProviderClass="android.support.v7.app.MediaRouteActionProvider"/>
+ * app:actionProviderClass="androidx.mediarouter.app.MediaRouteActionProvider"/>
* </menu>
* </pre><p>
* Then configure the menu and set the route selector for the chooser.
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
index fde8a63..e82fcb9 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
@@ -28,14 +28,15 @@
import android.graphics.drawable.AnimationDrawable;
import android.graphics.drawable.Drawable;
import android.os.AsyncTask;
-import android.support.v4.graphics.drawable.DrawableCompat;
-import android.support.v7.widget.TooltipCompat;
import android.util.AttributeSet;
import android.util.Log;
import android.util.SparseArray;
import android.view.SoundEffectConstants;
import android.view.View;
+import androidx.appcompat.widget.TooltipCompat;
+import androidx.core.graphics.drawable.DrawableCompat;
+
import com.android.media.update.ApiHelper;
import com.android.media.update.R;
import com.android.support.mediarouter.media.MediaRouteSelector;
@@ -70,7 +71,7 @@
* <h3>Prerequisites</h3>
* <p>
* To use the media route button, the activity must be a subclass of
- * {@link FragmentActivity} from the <code>android.support.v4</code>
+ * {@link FragmentActivity} from the <code>androidx.core./code>
* support library. Refer to support library documentation for details.
* </p>
*
@@ -81,9 +82,9 @@
private static final String TAG = "MediaRouteButton";
private static final String CHOOSER_FRAGMENT_TAG =
- "android.support.v7.mediarouter:MediaRouteChooserDialogFragment";
+ "androidx.mediarouter.media.outer:MediaRouteChooserDialogFragment";
private static final String CONTROLLER_FRAGMENT_TAG =
- "android.support.v7.mediarouter:MediaRouteControllerDialogFragment";
+ "androidx.mediarouter.media.outer:MediaRouteControllerDialogFragment";
private final MediaRouter mRouter;
private final MediaRouterCallback mCallback;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
index cac64d9..f24028a 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
@@ -16,13 +16,14 @@
package com.android.support.mediarouter.app;
-import static com.android.support.mediarouter.media.MediaRouter.RouteInfo.CONNECTION_STATE_CONNECTED;
-import static com.android.support.mediarouter.media.MediaRouter.RouteInfo.CONNECTION_STATE_CONNECTING;
+import static com.android.support.mediarouter.media.MediaRouter.RouteInfo
+ .CONNECTION_STATE_CONNECTED;
+import static com.android.support.mediarouter.media.MediaRouter.RouteInfo
+ .CONNECTION_STATE_CONNECTING;
import android.annotation.NonNull;
import android.app.Dialog;
import android.content.Context;
-import android.content.res.Resources;
import android.content.res.TypedArray;
import android.graphics.drawable.Drawable;
import android.net.Uri;
@@ -30,12 +31,10 @@
import android.os.Handler;
import android.os.Message;
import android.os.SystemClock;
-import android.support.v7.app.AppCompatDialog;
import android.text.TextUtils;
import android.util.Log;
import android.view.ContextThemeWrapper;
import android.view.Gravity;
-import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
index 060cfca..f6c1d2f 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
@@ -40,9 +40,6 @@
import android.support.v4.media.session.MediaControllerCompat;
import android.support.v4.media.session.MediaSessionCompat;
import android.support.v4.media.session.PlaybackStateCompat;
-import android.support.v4.util.ObjectsCompat;
-import android.support.v4.view.accessibility.AccessibilityEventCompat;
-import android.support.v7.graphics.Palette;
import android.text.TextUtils;
import android.util.Log;
import android.view.ContextThemeWrapper;
@@ -72,11 +69,15 @@
import android.widget.SeekBar;
import android.widget.TextView;
+import androidx.core.util.ObjectsCompat;
+import androidx.core.view.accessibility.AccessibilityEventCompat;
+import androidx.palette.graphics.Palette;
+
import com.android.media.update.ApiHelper;
import com.android.media.update.R;
+import com.android.support.mediarouter.app.OverlayListView.OverlayObject;
import com.android.support.mediarouter.media.MediaRouteSelector;
import com.android.support.mediarouter.media.MediaRouter;
-import com.android.support.mediarouter.app.OverlayListView.OverlayObject;
import java.io.BufferedInputStream;
import java.io.IOException;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
index a9eaf39..b5ee63e 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
@@ -16,7 +16,7 @@
package com.android.support.mediarouter.app;
-import android.support.annotation.NonNull;
+import androidx.annotation.NonNull;
/**
* The media route dialog factory is responsible for creating the media route
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
index 02ee118..52aecd88 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
@@ -17,10 +17,11 @@
package com.android.support.mediarouter.app;
import android.os.Bundle;
-import android.support.v4.app.Fragment;
-import com.android.support.mediarouter.media.MediaRouter;
+import androidx.fragment.app.Fragment;
+
import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
/**
* Media route discovery fragment.
@@ -34,7 +35,7 @@
* provide the {@link MediaRouter} callback to register.
* </p><p>
* Note that the discovery callback makes the application be connected with all the
- * {@link android.support.v7.media.MediaRouteProviderService media route provider services}
+ * {@link androidx.mediarouter.media.MediaRouteProviderService media route provider services}
* while it is registered.
* </p>
*/
@@ -114,7 +115,7 @@
}
/**
- * Called to create the {@link android.support.v7.media.MediaRouter.Callback callback}
+ * Called to create the {@link androidx.mediarouter.media.MediaRouter.Callback callback}
* that will be registered.
* <p>
* The default callback does nothing. The application may override this method to
@@ -129,7 +130,7 @@
/**
* Called to prepare the callback flags that will be used when the
- * {@link android.support.v7.media.MediaRouter.Callback callback} is registered.
+ * {@link androidx.mediarouter.media.MediaRouter.Callback callback} is registered.
* <p>
* The default implementation returns {@link MediaRouter#CALLBACK_FLAG_REQUEST_DISCOVERY}.
* </p>
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
index 6a0a95a..dcca6a0 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
@@ -21,7 +21,6 @@
import android.graphics.PorterDuff;
import android.graphics.PorterDuffColorFilter;
import android.graphics.drawable.AnimationDrawable;
-import android.support.v4.content.ContextCompat;
import android.util.AttributeSet;
import android.view.View;
import android.widget.ImageButton;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
index 63f042f..b4bf8d1 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
@@ -19,12 +19,13 @@
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Color;
-import android.support.annotation.IntDef;
-import android.support.v4.graphics.ColorUtils;
import android.util.TypedValue;
import android.view.ContextThemeWrapper;
import android.view.View;
+import androidx.annotation.IntDef;
+import androidx.core.graphics.ColorUtils;
+
import com.android.media.update.R;
import java.lang.annotation.Retention;
@@ -170,7 +171,7 @@
private static boolean isLightTheme(Context context) {
TypedValue value = new TypedValue();
// TODO(sungsoo): Switch to com.android.internal.R.attr.isLightTheme
- return context.getTheme().resolveAttribute(android.support.v7.appcompat.R.attr.isLightTheme,
+ return context.getTheme().resolveAttribute(androidx.appcompat.R.attr.isLightTheme,
value, true) && value.data != 0;
}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
index f8539bd..5a0bc95 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
@@ -20,7 +20,6 @@
import android.hardware.display.DisplayManager;
import android.os.Build;
import android.os.Handler;
-import android.support.annotation.RequiresApi;
import android.util.Log;
import android.view.Display;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
index 90ea2d5..92f608b 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
@@ -19,7 +19,8 @@
import android.app.PendingIntent;
import android.os.Bundle;
import android.os.SystemClock;
-import android.support.v4.util.TimeUtils;
+
+import androidx.core.util.TimeUtils;
/**
* Describes the playback status of a media item.
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
index 91a2e1a..7ea328c 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
@@ -23,7 +23,8 @@
import android.content.Intent;
import android.os.Handler;
import android.os.Message;
-import android.support.v4.util.ObjectsCompat;
+
+import androidx.core.util.ObjectsCompat;
import com.android.support.mediarouter.media.MediaRouter.ControlRequestCallback;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
index 43cde10..a186fee 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
@@ -29,12 +29,14 @@
.CLIENT_MSG_RELEASE_ROUTE_CONTROLLER;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_ROUTE_CONTROL_REQUEST;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_SELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_SELECT_ROUTE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_SET_DISCOVERY_REQUEST;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_SET_ROUTE_VOLUME;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_UNREGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_UNREGISTER;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_UNSELECT_ROUTE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
@@ -51,9 +53,12 @@
.SERVICE_MSG_GENERIC_FAILURE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.SERVICE_MSG_GENERIC_SUCCESS;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_MSG_REGISTERED;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_VERSION_CURRENT;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.isValidRemoteMessenger;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_REGISTERED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_VERSION_CURRENT;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .isValidRemoteMessenger;
import android.app.Service;
import android.content.Intent;
@@ -65,11 +70,12 @@
import android.os.Message;
import android.os.Messenger;
import android.os.RemoteException;
-import android.support.annotation.VisibleForTesting;
-import android.support.v4.util.ObjectsCompat;
import android.util.Log;
import android.util.SparseArray;
+import androidx.annotation.VisibleForTesting;
+import androidx.core.util.ObjectsCompat;
+
import java.lang.ref.WeakReference;
import java.util.ArrayList;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
index 5669b19..f20dcc0 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
@@ -17,8 +17,9 @@
import android.content.IntentFilter;
import android.os.Bundle;
-import android.support.annotation.NonNull;
-import android.support.annotation.Nullable;
+
+import androidx.annotation.NonNull;
+import androidx.annotation.Nullable;
import java.util.ArrayList;
import java.util.Arrays;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
index db0052e..4b56b19 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
@@ -33,15 +33,16 @@
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
-import android.support.v4.app.ActivityManagerCompat;
-import android.support.v4.hardware.display.DisplayManagerCompat;
-import android.support.v4.media.VolumeProviderCompat;
import android.support.v4.media.session.MediaSessionCompat;
-import android.support.v4.util.Pair;
import android.text.TextUtils;
import android.util.Log;
import android.view.Display;
+import androidx.core.app.ActivityManagerCompat;
+import androidx.core.hardware.display.DisplayManagerCompat;
+import androidx.core.util.Pair;
+import androidx.media.VolumeProviderCompat;
+
import com.android.support.mediarouter.media.MediaRouteProvider.ProviderMetadata;
import com.android.support.mediarouter.media.MediaRouteProvider.RouteController;
@@ -81,13 +82,13 @@
static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
/**
- * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
* and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the reason the route
* was unselected is unknown.
*/
public static final int UNSELECT_REASON_UNKNOWN = 0;
/**
- * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
* and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user pressed
* the disconnect button to disconnect and keep playing.
* <p>
@@ -96,13 +97,13 @@
*/
public static final int UNSELECT_REASON_DISCONNECTED = 1;
/**
- * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
* and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user pressed
* the stop casting button.
*/
public static final int UNSELECT_REASON_STOPPED = 2;
/**
- * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * Passed to {@link androidx.mediarouter.media.MediaRouteProvider.RouteController#onUnselect(int)}
* and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user selected
* a different route.
*/
@@ -174,7 +175,7 @@
* Applications should typically add a callback using this flag in the
* {@link android.app.Activity activity's} {@link android.app.Activity#onStart onStart}
* method and remove it in the {@link android.app.Activity#onStop onStop} method.
- * The {@link android.support.v7.app.MediaRouteDiscoveryFragment} fragment may
+ * The {@link androidx.mediarouter.app.MediaRouteDiscoveryFragment} fragment may
* also be used for this purpose.
* </p><p class="note">
* On {@link ActivityManager#isLowRamDevice low-RAM devices} this flag
@@ -182,7 +183,7 @@
* {@link #addCallback(MediaRouteSelector, Callback, int) addCallback} for details.
* </p>
*
- * @see android.support.v7.app.MediaRouteDiscoveryFragment
+ * @see androidx.mediarouter.app.MediaRouteDiscoveryFragment
*/
public static final int CALLBACK_FLAG_REQUEST_DISCOVERY = 1 << 2;
@@ -197,7 +198,7 @@
* {@link #addCallback(MediaRouteSelector, Callback, int) addCallback} for details.
* </p>
*
- * @see android.support.v7.app.MediaRouteDiscoveryFragment
+ * @see androidx.mediarouter.app.MediaRouteDiscoveryFragment
*/
public static final int CALLBACK_FLAG_FORCE_DISCOVERY = 1 << 3;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
index 3206596..0e7514c 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
@@ -19,7 +19,8 @@
import android.app.PendingIntent;
import android.os.Bundle;
import android.os.SystemClock;
-import android.support.v4.util.TimeUtils;
+
+import androidx.core.util.TimeUtils;
/**
* Describes the playback status of a media session.
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
index 98e4e28..eacf1c8 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
@@ -29,17 +29,20 @@
.CLIENT_MSG_RELEASE_ROUTE_CONTROLLER;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_ROUTE_CONTROL_REQUEST;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_SELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_SELECT_ROUTE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_SET_DISCOVERY_REQUEST;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_SET_ROUTE_VOLUME;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_UNREGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_UNREGISTER;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_UNSELECT_ROUTE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.CLIENT_MSG_UPDATE_ROUTE_VOLUME;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_VERSION_CURRENT;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_VERSION_CURRENT;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_DATA_ERROR;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.SERVICE_MSG_CONTROL_REQUEST_FAILED;
@@ -51,9 +54,11 @@
.SERVICE_MSG_GENERIC_FAILURE;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
.SERVICE_MSG_GENERIC_SUCCESS;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_MSG_REGISTERED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_REGISTERED;
import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_VERSION_1;
-import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.isValidRemoteMessenger;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .isValidRemoteMessenger;
import android.annotation.NonNull;
import android.content.ComponentName;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
index 826449b..65c5518 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
@@ -18,7 +18,6 @@
import android.content.Context;
import android.media.AudioManager;
import android.os.Build;
-import android.support.annotation.RequiresApi;
import java.lang.ref.WeakReference;
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
index f6e1497..e76564e 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
@@ -22,9 +22,10 @@
import android.content.IntentFilter;
import android.net.Uri;
import android.os.Bundle;
-import android.support.v4.util.ObjectsCompat;
import android.util.Log;
+import androidx.core.util.ObjectsCompat;
+
/**
* A helper class for playing media on remote routes using the remote playback protocol
* defined by {@link MediaControlIntent}.
@@ -867,11 +868,11 @@
private final class ActionReceiver extends BroadcastReceiver {
public static final String ACTION_ITEM_STATUS_CHANGED =
- "android.support.v7.media.actions.ACTION_ITEM_STATUS_CHANGED";
+ "androidx.mediarouter.media.actions.ACTION_ITEM_STATUS_CHANGED";
public static final String ACTION_SESSION_STATUS_CHANGED =
- "android.support.v7.media.actions.ACTION_SESSION_STATUS_CHANGED";
+ "androidx.mediarouter.media.actions.ACTION_SESSION_STATUS_CHANGED";
public static final String ACTION_MESSAGE_RECEIVED =
- "android.support.v7.media.actions.ACTION_MESSAGE_RECEIVED";
+ "androidx.mediarouter.media.actions.ACTION_MESSAGE_RECEIVED";
ActionReceiver() {
}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
index a38491f..53901a4 100644
--- a/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
@@ -24,7 +24,6 @@
import android.content.res.Resources;
import android.media.AudioManager;
import android.os.Build;
-import android.support.annotation.RequiresApi;
import android.view.Display;
import com.android.media.update.ApiHelper;
diff --git a/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java b/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
index 3aff150..ad85af4 100644
--- a/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
+++ b/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
@@ -20,15 +20,13 @@
import android.content.res.Resources;
import android.graphics.Point;
import android.media.MediaMetadata;
+import android.media.SessionToken2;
import android.media.session.MediaController;
import android.media.session.PlaybackState;
-import android.media.SessionToken2;
import android.media.update.MediaControlView2Provider;
import android.media.update.ViewGroupProvider;
import android.os.Bundle;
-import android.support.annotation.Nullable;
import android.util.AttributeSet;
-import android.util.Log;
import android.view.Gravity;
import android.view.MotionEvent;
import android.view.View;
@@ -36,27 +34,28 @@
import android.view.WindowManager;
import android.widget.AdapterView;
import android.widget.BaseAdapter;
-import android.widget.Button;
import android.widget.ImageButton;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.ListView;
import android.widget.MediaControlView2;
-import android.widget.ProgressBar;
import android.widget.PopupWindow;
+import android.widget.ProgressBar;
import android.widget.RelativeLayout;
import android.widget.SeekBar;
import android.widget.SeekBar.OnSeekBarChangeListener;
import android.widget.TextView;
+import androidx.annotation.Nullable;
+
import com.android.media.update.ApiHelper;
import com.android.media.update.R;
import com.android.support.mediarouter.app.MediaRouteButton;
-import com.android.support.mediarouter.media.MediaRouter;
import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
-import java.util.Arrays;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Formatter;
import java.util.List;
import java.util.Locale;
diff --git a/packages/MediaComponents/src/com/android/widget/SubtitleView.java b/packages/MediaComponents/src/com/android/widget/SubtitleView.java
index 67b2cd1..db0ae33 100644
--- a/packages/MediaComponents/src/com/android/widget/SubtitleView.java
+++ b/packages/MediaComponents/src/com/android/widget/SubtitleView.java
@@ -19,10 +19,11 @@
import android.content.Context;
import android.graphics.Canvas;
import android.os.Looper;
-import android.support.annotation.Nullable;
import android.util.AttributeSet;
import android.widget.FrameLayout;
+import androidx.annotation.Nullable;
+
import com.android.media.subtitle.SubtitleController.Anchor;
import com.android.media.subtitle.SubtitleTrack.RenderingWidget;
diff --git a/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
index fc92e85..c9869c0 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
@@ -16,17 +16,18 @@
package com.android.widget;
+import static android.widget.VideoView2.VIEW_TYPE_SURFACEVIEW;
+
import android.content.Context;
import android.graphics.Rect;
import android.media.MediaPlayer2;
-import android.support.annotation.NonNull;
import android.util.AttributeSet;
import android.util.Log;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
import android.view.View;
-import static android.widget.VideoView2.VIEW_TYPE_SURFACEVIEW;
+import androidx.annotation.NonNull;
class VideoSurfaceView extends SurfaceView implements VideoViewInterface, SurfaceHolder.Callback {
private static final String TAG = "VideoSurfaceView";
diff --git a/packages/MediaComponents/src/com/android/widget/VideoTextureView.java b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
index 024a3aa..40fb046 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
@@ -16,18 +16,19 @@
package com.android.widget;
+import static android.widget.VideoView2.VIEW_TYPE_TEXTUREVIEW;
+
import android.content.Context;
import android.graphics.SurfaceTexture;
import android.media.MediaPlayer2;
-import android.support.annotation.NonNull;
-import android.support.annotation.RequiresApi;
import android.util.AttributeSet;
import android.util.Log;
import android.view.Surface;
import android.view.TextureView;
import android.view.View;
-import static android.widget.VideoView2.VIEW_TYPE_TEXTUREVIEW;
+import androidx.annotation.NonNull;
+import androidx.annotation.RequiresApi;
@RequiresApi(26)
class VideoTextureView extends TextureView
diff --git a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
index 97279d6..ffb145a 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
@@ -28,30 +28,29 @@
import android.media.AudioFocusRequest;
import android.media.AudioManager;
import android.media.DataSourceDesc;
+import android.media.MediaItem2;
import android.media.MediaMetadata;
+import android.media.MediaMetadata2;
+import android.media.MediaMetadataRetriever;
import android.media.MediaPlayer2;
import android.media.MediaPlayer2.MediaPlayer2EventCallback;
import android.media.MediaPlayer2.OnSubtitleDataListener;
import android.media.MediaPlayer2Impl;
-import android.media.SubtitleData;
-import android.media.MediaItem2;
-import android.media.MediaMetadata2;
-import android.media.MediaMetadataRetriever;
import android.media.Metadata;
import android.media.PlaybackParams;
+import android.media.SessionToken2;
+import android.media.SubtitleData;
import android.media.TimedText;
import android.media.session.MediaController;
import android.media.session.MediaController.PlaybackInfo;
import android.media.session.MediaSession;
import android.media.session.PlaybackState;
-import android.media.SessionToken2;
import android.media.update.VideoView2Provider;
import android.media.update.ViewGroupProvider;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Bundle;
import android.os.ResultReceiver;
-import android.support.annotation.Nullable;
import android.util.AttributeSet;
import android.util.DisplayMetrics;
import android.util.Log;
@@ -66,6 +65,8 @@
import android.widget.TextView;
import android.widget.VideoView2;
+import androidx.annotation.Nullable;
+
import com.android.internal.graphics.palette.Palette;
import com.android.media.RoutePlayer;
import com.android.media.subtitle.ClosedCaptionRenderer;
@@ -73,10 +74,10 @@
import com.android.media.subtitle.SubtitleTrack;
import com.android.media.update.ApiHelper;
import com.android.media.update.R;
-import com.android.support.mediarouter.media.MediaItemStatus;
import com.android.support.mediarouter.media.MediaControlIntent;
-import com.android.support.mediarouter.media.MediaRouter;
+import com.android.support.mediarouter.media.MediaItemStatus;
import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
import java.util.ArrayList;
import java.util.List;
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 7419e64..c0aa477 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -2,24 +2,6 @@
include $(CLEAR_VARS)
-LOCAL_SRC_FILES := \
- ServiceUtilities.cpp
-
-# FIXME Move this library to frameworks/native
-LOCAL_MODULE := libserviceutility
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libutils \
- liblog \
- libbinder
-
-LOCAL_CFLAGS := -Wall -Werror
-
-include $(BUILD_SHARED_LIBRARY)
-
-include $(CLEAR_VARS)
-
LOCAL_SRC_FILES:= \
AudioFlinger.cpp \
Threads.cpp \
@@ -31,7 +13,8 @@
PatchPanel.cpp \
StateQueue.cpp \
BufLog.cpp \
- TypedLogger.cpp
+ TypedLogger.cpp \
+ NBAIO_Tee.cpp \
LOCAL_C_INCLUDES := \
frameworks/av/services/audiopolicy \
@@ -53,13 +36,13 @@
libnbaio \
libnblog \
libpowermanager \
- libserviceutility \
libmediautils \
libmemunreachable \
libmedia_helper
LOCAL_STATIC_LIBRARIES := \
libcpustats \
+ libsndfile \
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
@@ -82,6 +65,7 @@
LOCAL_CFLAGS += -fvisibility=hidden
LOCAL_CFLAGS += -Werror -Wall
+LOCAL_SANITIZE := integer_overflow
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index bdd39c6..79501cd 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -28,7 +28,6 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
-#include <cutils/multiuser.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <binder/Parcel.h>
@@ -47,7 +46,7 @@
#include <system/audio.h>
#include "AudioFlinger.h"
-#include "ServiceUtilities.h"
+#include "NBAIO_Tee.h"
#include <media/AudioResamplerPublic.h>
@@ -56,7 +55,6 @@
#include <system/audio_effects/effect_aec.h>
#include <audio_utils/primitives.h>
-#include <audio_utils/string.h>
#include <powermanager/PowerManager.h>
@@ -66,6 +64,7 @@
#include <media/nbaio/PipeReader.h>
#include <media/AudioParameter.h>
#include <mediautils/BatteryNotifier.h>
+#include <mediautils/ServiceUtilities.h>
#include <private/android_filesystem_config.h>
//#define BUFLOG_NDEBUG 0
@@ -100,17 +99,6 @@
uint32_t AudioFlinger::mScreenState;
-
-#ifdef TEE_SINK
-bool AudioFlinger::mTeeSinkInputEnabled = false;
-bool AudioFlinger::mTeeSinkOutputEnabled = false;
-bool AudioFlinger::mTeeSinkTrackEnabled = false;
-
-size_t AudioFlinger::mTeeSinkInputFrames = kTeeSinkInputFramesDefault;
-size_t AudioFlinger::mTeeSinkOutputFrames = kTeeSinkOutputFramesDefault;
-size_t AudioFlinger::mTeeSinkTrackFrames = kTeeSinkTrackFramesDefault;
-#endif
-
// In order to avoid invalidating offloaded tracks each time a Visualizer is turned on and off
// we define a minimum time during which a global effect is considered enabled.
static const nsecs_t kMinGlobalEffectEnabletimeNs = seconds(7200);
@@ -160,6 +148,7 @@
mTotalMemory(0),
mClientSharedHeapSize(kMinimumClientSharedHeapSizeBytes),
mGlobalEffectEnableTime(0),
+ mPatchPanel(this),
mSystemReady(false)
{
// unsigned instead of audio_unique_id_use_t, because ++ operator is unavailable for enum
@@ -168,7 +157,6 @@
mNextUniqueIds[use] = AUDIO_UNIQUE_ID_USE_MAX;
}
- getpid_cached = getpid();
const bool doLog = property_get_bool("ro.test_harness", false);
if (doLog) {
mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
@@ -185,27 +173,6 @@
mEffectsFactoryHal = EffectsFactoryHalInterface::create();
mMediaLogNotifier->run("MediaLogNotifier");
-
-#ifdef TEE_SINK
- char value[PROPERTY_VALUE_MAX];
- (void) property_get("ro.debuggable", value, "0");
- int debuggable = atoi(value);
- int teeEnabled = 0;
- if (debuggable) {
- (void) property_get("af.tee", value, "0");
- teeEnabled = atoi(value);
- }
- // FIXME symbolic constants here
- if (teeEnabled & 1) {
- mTeeSinkInputEnabled = true;
- }
- if (teeEnabled & 2) {
- mTeeSinkOutputEnabled = true;
- }
- if (teeEnabled & 4) {
- mTeeSinkTrackEnabled = true;
- }
-#endif
}
void AudioFlinger::onFirstRef()
@@ -226,8 +193,6 @@
}
}
- mPatchPanel = new PatchPanel(this);
-
mMode = AUDIO_MODE_NORMAL;
gAudioFlinger = this;
@@ -341,7 +306,7 @@
*sessionId = actualSessionId;
} else {
if (direction == MmapStreamInterface::DIRECTION_OUTPUT) {
- AudioSystem::releaseOutput(io, streamType, actualSessionId);
+ AudioSystem::releaseOutput(portId);
} else {
AudioSystem::releaseInput(portId);
}
@@ -534,12 +499,7 @@
dev->dump(fd);
}
-#ifdef TEE_SINK
- // dump the serially shared record tee sink
- if (mRecordTeeSource != 0) {
- dumpTee(fd, mRecordTeeSource, AUDIO_IO_HANDLE_NONE, 'C');
- }
-#endif
+ mPatchPanel.dump(fd);
BUFLOG_RESET;
@@ -547,6 +507,10 @@
mLock.unlock();
}
+#ifdef TEE_SINK
+ // NBAIO_Tee dump is safe to call outside of AF lock.
+ NBAIO_Tee::dumpAll(fd, "_DUMP");
+#endif
// append a copy of media.log here by forwarding fd to it, but don't attempt
// to lookup the service if it's not running, as it will block for a second
if (sMediaLogServiceAsBinder != 0) {
@@ -666,7 +630,7 @@
bool updatePid = (input.clientInfo.clientPid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
uid_t clientUid = input.clientInfo.clientUid;
- if (!isTrustedCallingUid(callingUid)) {
+ if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(clientUid != callingUid,
"%s uid %d tried to pass itself off as %d",
__FUNCTION__, callingUid, clientUid);
@@ -813,7 +777,7 @@
Exit:
if (lStatus != NO_ERROR && output.outputId != AUDIO_IO_HANDLE_NONE) {
- AudioSystem::releaseOutput(output.outputId, streamType, sessionId);
+ AudioSystem::releaseOutput(portId);
}
*status = lStatus;
return trackHandle;
@@ -1078,9 +1042,9 @@
ALOGW("checkStreamType() invalid stream %d", stream);
return BAD_VALUE;
}
- pid_t caller = IPCThreadState::self()->getCallingPid();
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT && caller != getpid_cached) {
- ALOGW("checkStreamType() pid %d cannot use internal stream type %d", caller, stream);
+ const uid_t callerUid = IPCThreadState::self()->getCallingUid();
+ if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT && !isAudioServerUid(callerUid)) {
+ ALOGW("checkStreamType() uid %d cannot use internal stream type %d", callerUid, stream);
return PERMISSION_DENIED;
}
@@ -1180,6 +1144,23 @@
}
}
+// forwardAudioHwSyncToDownstreamPatches_l() must be called with AudioFlinger::mLock held
+void AudioFlinger::forwardParametersToDownstreamPatches_l(
+ audio_io_handle_t upStream, const String8& keyValuePairs,
+ std::function<bool(const sp<PlaybackThread>&)> useThread)
+{
+ std::vector<PatchPanel::SoftwarePatch> swPatches;
+ if (mPatchPanel.getDownstreamSoftwarePatches(upStream, &swPatches) != OK) return;
+ ALOGV_IF(!swPatches.empty(), "%s found %zu downstream patches for stream ID %d",
+ __func__, swPatches.size(), upStream);
+ for (const auto& swPatch : swPatches) {
+ sp<PlaybackThread> downStream = checkPlaybackThread_l(swPatch.getPlaybackThreadHandle());
+ if (downStream != NULL && (useThread == nullptr || useThread(downStream))) {
+ downStream->setParameters(keyValuePairs);
+ }
+ }
+}
+
// Filter reserved keys from setParameters() before forwarding to audio HAL or acting upon.
// Some keys are used for audio routing and audio path configuration and should be reserved for use
// by audio policy and audio flinger for functional, privacy and security reasons.
@@ -1200,9 +1181,8 @@
String8(AudioParameter::keyStreamSupportedSamplingRates),
};
- // multiuser friendly app ID check for requests coming from audioserver
- if (multiuser_get_app_id(callingUid) == AID_AUDIOSERVER) {
- return;
+ if (isAudioServerUid(callingUid)) {
+ return; // no need to filter if audioserver.
}
AudioParameter param = AudioParameter(keyValuePairs);
@@ -1296,7 +1276,9 @@
}
}
if (thread != 0) {
- return thread->setParameters(filteredKeyValuePairs);
+ status_t result = thread->setParameters(filteredKeyValuePairs);
+ forwardParametersToDownstreamPatches_l(thread->id(), filteredKeyValuePairs);
+ return result;
}
return BAD_VALUE;
}
@@ -1636,7 +1618,7 @@
bool updatePid = (input.clientInfo.clientPid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
uid_t clientUid = input.clientInfo.clientUid;
- if (!isTrustedCallingUid(callingUid)) {
+ if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(clientUid != callingUid,
"%s uid %d tried to pass itself off as %d",
__FUNCTION__, callingUid, clientUid);
@@ -1854,6 +1836,10 @@
mHardwareStatus = AUDIO_HW_IDLE;
}
+ if (strcmp(name, AUDIO_HAL_SERVICE_NAME_MSD) == 0) {
+ // An MSD module is inserted before hardware modules in order to mix encoded streams.
+ flags = static_cast<AudioHwDevice::Flags>(flags | AudioHwDevice::AHWD_IS_INSERT);
+ }
audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
@@ -1885,7 +1871,7 @@
status_t AudioFlinger::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory)
{
uid_t uid = IPCThreadState::self()->getCallingUid();
- if (uid != AID_SYSTEM) {
+ if (!isAudioServerOrSystemServerUid(uid)) {
return PERMISSION_DENIED;
}
Mutex::Autolock _l(mLock);
@@ -1930,6 +1916,28 @@
return mClientSharedHeapSize;
}
+status_t AudioFlinger::setAudioPortConfig(const struct audio_port_config *config)
+{
+ ALOGV(__func__);
+
+ audio_module_handle_t module;
+ if (config->type == AUDIO_PORT_TYPE_DEVICE) {
+ module = config->ext.device.hw_module;
+ } else {
+ module = config->ext.mix.hw_module;
+ }
+
+ Mutex::Autolock _l(mLock);
+ ssize_t index = mAudioHwDevs.indexOfKey(module);
+ if (index < 0) {
+ ALOGW("%s() bad hw module %d", __func__, module);
+ return BAD_VALUE;
+ }
+
+ AudioHwDevice *audioHwDevice = mAudioHwDevs.valueAt(index);
+ return audioHwDevice->hwDevice()->setAudioPortConfig(config);
+}
+
audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId)
{
Mutex::Autolock _l(mLock);
@@ -1975,7 +1983,10 @@
if (sessions & ThreadBase::TRACK_SESSION) {
AudioParameter param = AudioParameter();
param.addInt(String8(AudioParameter::keyStreamHwAvSync), value);
- thread->setParameters(param.toString());
+ String8 keyValuePairs = param.toString();
+ thread->setParameters(keyValuePairs);
+ forwardParametersToDownstreamPatches_l(thread->id(), keyValuePairs,
+ [](const sp<PlaybackThread>& thread) { return thread->usesHwAvSync(); });
break;
}
}
@@ -2021,7 +2032,10 @@
ALOGV("setAudioHwSyncForSession_l found ID %d for session %d", syncId, sessionId);
AudioParameter param = AudioParameter();
param.addInt(String8(AudioParameter::keyStreamHwAvSync), syncId);
- thread->setParameters(param.toString());
+ String8 keyValuePairs = param.toString();
+ thread->setParameters(keyValuePairs);
+ forwardParametersToDownstreamPatches_l(thread->id(), keyValuePairs,
+ [](const sp<PlaybackThread>& thread) { return thread->usesHwAvSync(); });
}
}
@@ -2111,6 +2125,7 @@
*output, thread.get());
}
mPlaybackThreads.add(*output, thread);
+ mPatchPanel.notifyStreamOpened(outHwDev, *output);
return thread;
}
}
@@ -2246,6 +2261,7 @@
const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
ioDesc->mIoHandle = output;
ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
+ mPatchPanel.notifyStreamClosed(output);
}
// The thread entity (active unit of execution) is no longer running here,
// but the ThreadBase container still exists.
@@ -2274,7 +2290,7 @@
delete out;
}
-void AudioFlinger::closeOutputInternal_l(const sp<PlaybackThread>& thread)
+void AudioFlinger::closeThreadInternal_l(const sp<PlaybackThread>& thread)
{
mPlaybackThreads.removeItem(thread->mId);
thread->exit();
@@ -2350,6 +2366,9 @@
return 0;
}
+ // Some flags are specific to framework and must not leak to the HAL.
+ flags = static_cast<audio_input_flags_t>(flags & ~AUDIO_INPUT_FRAMEWORK_FLAGS);
+
// Audio Policy can request a specific handle for hardware hotword.
// The goal here is not to re-open an already opened input.
// It is to use a pre-assigned I/O handle.
@@ -2407,55 +2426,6 @@
thread.get());
return thread;
} else {
-#ifdef TEE_SINK
- // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
- // or (re-)create if current Pipe is idle and does not match the new format
- sp<NBAIO_Sink> teeSink;
- enum {
- TEE_SINK_NO, // don't copy input
- TEE_SINK_NEW, // copy input using a new pipe
- TEE_SINK_OLD, // copy input using an existing pipe
- } kind;
- NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate,
- audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format);
- if (!mTeeSinkInputEnabled) {
- kind = TEE_SINK_NO;
- } else if (!Format_isValid(format)) {
- kind = TEE_SINK_NO;
- } else if (mRecordTeeSink == 0) {
- kind = TEE_SINK_NEW;
- } else if (mRecordTeeSink->getStrongCount() != 1) {
- kind = TEE_SINK_NO;
- } else if (Format_isEqual(format, mRecordTeeSink->format())) {
- kind = TEE_SINK_OLD;
- } else {
- kind = TEE_SINK_NEW;
- }
- switch (kind) {
- case TEE_SINK_NEW: {
- Pipe *pipe = new Pipe(mTeeSinkInputFrames, format);
- size_t numCounterOffers = 0;
- const NBAIO_Format offers[1] = {format};
- ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- PipeReader *pipeReader = new PipeReader(*pipe);
- numCounterOffers = 0;
- index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mRecordTeeSink = pipe;
- mRecordTeeSource = pipeReader;
- teeSink = pipe;
- }
- break;
- case TEE_SINK_OLD:
- teeSink = mRecordTeeSink;
- break;
- case TEE_SINK_NO:
- default:
- break;
- }
-#endif
-
// Start record thread
// RecordThread requires both input and output device indication to forward to audio
// pre processing modules
@@ -2465,9 +2435,6 @@
primaryOutputDevice_l(),
devices,
mSystemReady
-#ifdef TEE_SINK
- , teeSink
-#endif
);
mRecordThreads.add(*input, thread);
ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get());
@@ -2567,7 +2534,7 @@
delete in;
}
-void AudioFlinger::closeInputInternal_l(const sp<RecordThread>& thread)
+void AudioFlinger::closeThreadInternal_l(const sp<RecordThread>& thread)
{
mRecordThreads.removeItem(thread->mId);
closeInputFinish(thread);
@@ -2605,7 +2572,8 @@
Mutex::Autolock _l(mLock);
pid_t caller = IPCThreadState::self()->getCallingPid();
ALOGV("acquiring %d from %d, for %d", audioSession, caller, pid);
- if (pid != -1 && (caller == getpid_cached)) {
+ const uid_t callerUid = IPCThreadState::self()->getCallingUid();
+ if (pid != -1 && isAudioServerUid(callerUid)) { // check must match releaseAudioSessionId()
caller = pid;
}
@@ -2639,7 +2607,8 @@
Mutex::Autolock _l(mLock);
pid_t caller = IPCThreadState::self()->getCallingPid();
ALOGV("releasing %d from %d for %d", audioSession, caller, pid);
- if (pid != -1 && (caller == getpid_cached)) {
+ const uid_t callerUid = IPCThreadState::self()->getCallingUid();
+ if (pid != -1 && isAudioServerUid(callerUid)) { // check must match acquireAudioSessionId()
caller = pid;
}
size_t num = mAudioSessionRefs.size();
@@ -2656,9 +2625,10 @@
return;
}
}
- // If the caller is mediaserver it is likely that the session being released was acquired
+ // If the caller is audioserver it is likely that the session being released was acquired
// on behalf of a process not in notification clients and we ignore the warning.
- ALOGW_IF(caller != getpid_cached, "session id %d not found for pid %d", audioSession, caller);
+ ALOGW_IF(!isAudioServerUid(callerUid),
+ "session id %d not found for pid %d", audioSession, caller);
}
bool AudioFlinger::isSessionAcquired_l(audio_session_t audioSession)
@@ -2966,7 +2936,7 @@
effect_descriptor_t desc;
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (pid == -1 || !isTrustedCallingUid(callingUid)) {
+ if (pid == -1 || !isAudioServerOrMediaServerUid(callingUid)) {
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
ALOGW_IF(pid != -1 && pid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
@@ -2989,8 +2959,8 @@
}
// Session AUDIO_SESSION_OUTPUT_STAGE is reserved for output stage effects
- // that can only be created by audio policy manager (running in same process)
- if (sessionId == AUDIO_SESSION_OUTPUT_STAGE && getpid_cached != pid) {
+ // that can only be created by audio policy manager
+ if (sessionId == AUDIO_SESSION_OUTPUT_STAGE && !isAudioServerUid(callingUid)) {
lStatus = PERMISSION_DENIED;
goto Exit;
}
@@ -3370,136 +3340,6 @@
}
-struct Entry {
-#define TEE_MAX_FILENAME 32 // %Y%m%d%H%M%S_%d.wav = 4+2+2+2+2+2+1+1+4+1 = 21
- char mFileName[TEE_MAX_FILENAME];
-};
-
-int comparEntry(const void *p1, const void *p2)
-{
- return strcmp(((const Entry *) p1)->mFileName, ((const Entry *) p2)->mFileName);
-}
-
-#ifdef TEE_SINK
-void AudioFlinger::dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id, char suffix)
-{
- NBAIO_Source *teeSource = source.get();
- if (teeSource != NULL) {
- // .wav rotation
- // There is a benign race condition if 2 threads call this simultaneously.
- // They would both traverse the directory, but the result would simply be
- // failures at unlink() which are ignored. It's also unlikely since
- // normally dumpsys is only done by bugreport or from the command line.
- char teePath[PATH_MAX] = "/data/misc/audioserver";
- size_t teePathLen = strlen(teePath);
- DIR *dir = opendir(teePath);
- teePath[teePathLen++] = '/';
- if (dir != NULL) {
-#define TEE_MAX_SORT 20 // number of entries to sort
-#define TEE_MAX_KEEP 10 // number of entries to keep
- struct Entry entries[TEE_MAX_SORT];
- size_t entryCount = 0;
- while (entryCount < TEE_MAX_SORT) {
- errno = 0; // clear errno before readdir() to track potential errors.
- const struct dirent *result = readdir(dir);
- if (result == nullptr) {
- ALOGW_IF(errno != 0, "tee readdir() failure %s", strerror(errno));
- break;
- }
- // ignore non .wav file entries
- const size_t nameLen = strlen(result->d_name);
- if (nameLen <= 4 || nameLen >= TEE_MAX_FILENAME ||
- strcmp(&result->d_name[nameLen - 4], ".wav")) {
- continue;
- }
- (void)audio_utils_strlcpy(entries[entryCount++].mFileName, result->d_name);
- }
- (void) closedir(dir);
- if (entryCount > TEE_MAX_KEEP) {
- qsort(entries, entryCount, sizeof(Entry), comparEntry);
- for (size_t i = 0; i < entryCount - TEE_MAX_KEEP; ++i) {
- strcpy(&teePath[teePathLen], entries[i].mFileName);
- (void) unlink(teePath);
- }
- }
- } else {
- if (fd >= 0) {
- dprintf(fd, "unable to rotate tees in %.*s: %s\n", (int) teePathLen, teePath,
- strerror(errno));
- }
- }
- char teeTime[16];
- struct timeval tv;
- gettimeofday(&tv, NULL);
- struct tm tm;
- localtime_r(&tv.tv_sec, &tm);
- strftime(teeTime, sizeof(teeTime), "%Y%m%d%H%M%S", &tm);
- snprintf(&teePath[teePathLen], sizeof(teePath) - teePathLen, "%s_%d_%c.wav", teeTime, id,
- suffix);
- // if 2 dumpsys are done within 1 second, and rotation didn't work, then discard 2nd
- int teeFd = open(teePath, O_WRONLY | O_CREAT | O_EXCL | O_NOFOLLOW, S_IRUSR | S_IWUSR);
- if (teeFd >= 0) {
- // FIXME use libsndfile
- char wavHeader[44];
- memcpy(wavHeader,
- "RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0",
- sizeof(wavHeader));
- NBAIO_Format format = teeSource->format();
- unsigned channelCount = Format_channelCount(format);
- uint32_t sampleRate = Format_sampleRate(format);
- size_t frameSize = Format_frameSize(format);
- wavHeader[22] = channelCount; // number of channels
- wavHeader[24] = sampleRate; // sample rate
- wavHeader[25] = sampleRate >> 8;
- wavHeader[32] = frameSize; // block alignment
- wavHeader[33] = frameSize >> 8;
- write(teeFd, wavHeader, sizeof(wavHeader));
- size_t total = 0;
- bool firstRead = true;
-#define TEE_SINK_READ 1024 // frames per I/O operation
- void *buffer = malloc(TEE_SINK_READ * frameSize);
- for (;;) {
- size_t count = TEE_SINK_READ;
- ssize_t actual = teeSource->read(buffer, count);
- bool wasFirstRead = firstRead;
- firstRead = false;
- if (actual <= 0) {
- if (actual == (ssize_t) OVERRUN && wasFirstRead) {
- continue;
- }
- break;
- }
- ALOG_ASSERT(actual <= (ssize_t)count);
- write(teeFd, buffer, actual * frameSize);
- total += actual;
- }
- free(buffer);
- lseek(teeFd, (off_t) 4, SEEK_SET);
- uint32_t temp = 44 + total * frameSize - 8;
- // FIXME not big-endian safe
- write(teeFd, &temp, sizeof(temp));
- lseek(teeFd, (off_t) 40, SEEK_SET);
- temp = total * frameSize;
- // FIXME not big-endian safe
- write(teeFd, &temp, sizeof(temp));
- close(teeFd);
- // TODO Should create file with temporary name and then rename to final if non-empty.
- if (total > 0) {
- if (fd >= 0) {
- dprintf(fd, "tee copied to %s\n", teePath);
- }
- } else {
- unlink(teePath);
- }
- } else {
- if (fd >= 0) {
- dprintf(fd, "unable to create tee %s: %s\n", teePath, strerror(errno));
- }
- }
- }
-}
-#endif
-
// ----------------------------------------------------------------------------
status_t AudioFlinger::onTransact(
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 963a87d..4a780ce 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -62,7 +62,9 @@
#include <media/LinearMap.h>
#include <media/VolumeShaper.h>
+#include <audio_utils/clock.h>
#include <audio_utils/SimpleLog.h>
+#include <audio_utils/TimestampVerifier.h>
#include "FastCapture.h"
#include "FastMixer.h"
@@ -71,6 +73,7 @@
#include "AudioStreamOut.h"
#include "SpdifStreamOut.h"
#include "AudioHwDevice.h"
+#include "NBAIO_Tee.h"
#include <powermanager/IPowerManager.h>
@@ -675,6 +678,9 @@
bool updateOrphanEffectChains(const sp<EffectModule>& effect);
void broacastParametersToRecordThreads_l(const String8& keyValuePairs);
+ void forwardParametersToDownstreamPatches_l(
+ audio_io_handle_t upStream, const String8& keyValuePairs,
+ std::function<bool(const sp<PlaybackThread>&)> useThread = nullptr);
// AudioStreamIn is immutable, so their fields are const.
// For emphasis, we could also make all pointers to them be "const *",
@@ -791,44 +797,16 @@
// for use from destructor
status_t closeOutput_nonvirtual(audio_io_handle_t output);
- void closeOutputInternal_l(const sp<PlaybackThread>& thread);
+ void closeThreadInternal_l(const sp<PlaybackThread>& thread);
status_t closeInput_nonvirtual(audio_io_handle_t input);
- void closeInputInternal_l(const sp<RecordThread>& thread);
+ void closeThreadInternal_l(const sp<RecordThread>& thread);
void setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId);
status_t checkStreamType(audio_stream_type_t stream) const;
void filterReservedParameters(String8& keyValuePairs, uid_t callingUid);
-#ifdef TEE_SINK
- // all record threads serially share a common tee sink, which is re-created on format change
- sp<NBAIO_Sink> mRecordTeeSink;
- sp<NBAIO_Source> mRecordTeeSource;
-#endif
-
public:
-
-#ifdef TEE_SINK
- // tee sink, if enabled by property, allows dumpsys to write most recent audio to .wav file
- static void dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id, char suffix);
-
- // whether tee sink is enabled by property
- static bool mTeeSinkInputEnabled;
- static bool mTeeSinkOutputEnabled;
- static bool mTeeSinkTrackEnabled;
-
- // runtime configured size of each tee sink pipe, in frames
- static size_t mTeeSinkInputFrames;
- static size_t mTeeSinkOutputFrames;
- static size_t mTeeSinkTrackFrames;
-
- // compile-time default size of tee sink pipes, in frames
- // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes
- static const size_t kTeeSinkInputFramesDefault = 0x200000;
- static const size_t kTeeSinkOutputFramesDefault = 0x200000;
- static const size_t kTeeSinkTrackFramesDefault = 0x200000;
-#endif
-
// These methods read variables atomically without mLock,
// though the variables are updated with mLock.
bool isLowRamDevice() const { return mIsLowRamDevice; }
@@ -843,7 +821,8 @@
nsecs_t mGlobalEffectEnableTime; // when a global effect was last enabled
- sp<PatchPanel> mPatchPanel;
+ // protected by mLock
+ PatchPanel mPatchPanel;
sp<EffectsFactoryHalInterface> mEffectsFactoryHal;
bool mSystemReady;
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index eb826c6..d4299b0 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -35,6 +35,9 @@
enum Flags {
AHWD_CAN_SET_MASTER_VOLUME = 0x1,
AHWD_CAN_SET_MASTER_MUTE = 0x2,
+ // Means that this isn't a terminal module, and software patches
+ // are used to transport audio data further.
+ AHWD_IS_INSERT = 0x4,
};
AudioHwDevice(audio_module_handle_t handle,
@@ -55,6 +58,10 @@
return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
}
+ bool isInsert() const {
+ return (0 != (mFlags & AHWD_IS_INSERT));
+ }
+
audio_module_handle_t handle() const { return mHandle; }
const char *moduleName() const { return mModuleName; }
sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 2047dfd..786c4af 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -31,9 +31,9 @@
#include <media/AudioEffect.h>
#include <media/audiohal/EffectHalInterface.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <mediautils/ServiceUtilities.h>
#include "AudioFlinger.h"
-#include "ServiceUtilities.h"
// ----------------------------------------------------------------------------
@@ -1815,7 +1815,7 @@
bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock);
snprintf(buffer, size, "\t\t\t%5d %5d %3s %3s %5u %5u\n",
- (mClient == 0) ? getpid_cached : mClient->pid(),
+ (mClient == 0) ? getpid() : mClient->pid(),
mPriority,
mHasControl ? "yes" : "no",
locked ? "yes" : "no",
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 79bb9fe..ad35264 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -47,7 +47,8 @@
/*static*/ const FastMixerState FastMixer::sInitial;
-FastMixer::FastMixer() : FastThread("cycle_ms", "load_us"),
+FastMixer::FastMixer(audio_io_handle_t parentIoHandle)
+ : FastThread("cycle_ms", "load_us"),
// mFastTrackNames
// mGenerations
mOutputSink(NULL),
@@ -66,8 +67,11 @@
mTotalNativeFramesWritten(0),
// timestamp
mNativeFramesWrittenButNotPresented(0), // the = 0 is to silence the compiler
- mMasterMono(false)
+ mMasterMono(false),
+ mThreadIoHandle(parentIoHandle)
{
+ (void)mThreadIoHandle; // prevent unused warning, see C++17 [[maybe_unused]]
+
// FIXME pass sInitial as parameter to base class constructor, and make it static local
mPrevious = &sInitial;
mCurrent = &sInitial;
@@ -220,6 +224,10 @@
previousTrackMask = 0;
mFastTracksGen = current->mFastTracksGen - 1;
dumpState->mFrameCount = frameCount;
+#ifdef TEE_SINK
+ mTee.set(mFormat, NBAIO_Tee::TEE_FLAG_OUTPUT_THREAD);
+ mTee.setId(std::string("_") + std::to_string(mThreadIoHandle) + "_F");
+#endif
} else {
previousTrackMask = previous->mTrackMask;
}
@@ -328,13 +336,15 @@
{
// TODO: pass an ID parameter to indicate which time series we want to write to in NBLog.cpp
// Or: pass both of these into a single call with a boolean
+ const FastMixerState * const current = (const FastMixerState *) mCurrent;
+ FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
+
if (mIsWarm) {
LOG_HIST_TS();
} else {
+ dumpState->mTimestampVerifier.discontinuity();
LOG_AUDIO_STATE();
}
- const FastMixerState * const current = (const FastMixerState *) mCurrent;
- FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
const FastMixerState::Command command = mCommand;
const size_t frameCount = current->mFrameCount;
@@ -446,10 +456,9 @@
frameCount * Format_channelCount(mFormat));
}
// if non-NULL, then duplicate write() to this non-blocking sink
- NBAIO_Sink* teeSink;
- if ((teeSink = current->mTeeSink) != NULL) {
- (void) teeSink->write(buffer, frameCount);
- }
+#ifdef TEE_SINK
+ mTee.write(buffer, frameCount);
+#endif
// FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink,
// but this code should be modified to handle both non-blocking and blocking sinks
dumpState->mWriteSequence++;
@@ -470,35 +479,47 @@
mAttemptedWrite = true;
// FIXME count # of writes blocked excessively, CPU usage, etc. for dump
- ExtendedTimestamp timestamp; // local
- status_t status = mOutputSink->getTimestamp(timestamp);
- if (status == NO_ERROR) {
- const int64_t totalNativeFramesPresented =
- timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
- if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
- mNativeFramesWrittenButNotPresented =
- mTotalNativeFramesWritten - totalNativeFramesPresented;
- mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+ if (mIsWarm) {
+ ExtendedTimestamp timestamp; // local
+ status_t status = mOutputSink->getTimestamp(timestamp);
+ if (status == NO_ERROR) {
+ dumpState->mTimestampVerifier.add(
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+ mSampleRate);
+ const int64_t totalNativeFramesPresented =
timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
- timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
+ mNativeFramesWrittenButNotPresented =
+ mTotalNativeFramesWritten - totalNativeFramesPresented;
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ // We don't compensate for server - kernel time difference and
+ // only update latency if we have valid info.
+ dumpState->mLatencyMs =
+ (double)mNativeFramesWrittenButNotPresented * 1000 / mSampleRate;
+ } else {
+ // HAL reported that more frames were presented than were written
+ mNativeFramesWrittenButNotPresented = 0;
+ status = INVALID_OPERATION;
+ }
} else {
- // HAL reported that more frames were presented than were written
- mNativeFramesWrittenButNotPresented = 0;
- status = INVALID_OPERATION;
+ dumpState->mTimestampVerifier.error();
}
- }
- if (status == NO_ERROR) {
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
- } else {
- // fetch server time if we can't get timestamp
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
- systemTime(SYSTEM_TIME_MONOTONIC);
- // clear out kernel cached position as this may get rapidly stale
- // if we never get a new valid timestamp
- mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+ if (status == NO_ERROR) {
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ } else {
+ // fetch server time if we can't get timestamp
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+ systemTime(SYSTEM_TIME_MONOTONIC);
+ // clear out kernel cached position as this may get rapidly stale
+ // if we never get a new valid timestamp
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+ }
}
}
}
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 235d23f..1c86d9a 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -22,6 +22,7 @@
#include "StateQueue.h"
#include "FastMixerState.h"
#include "FastMixerDumpState.h"
+#include "NBAIO_Tee.h"
namespace android {
@@ -32,7 +33,9 @@
class FastMixer : public FastThread {
public:
- FastMixer();
+ /** FastMixer constructor takes as param the parent MixerThread's io handle (id)
+ for purposes of identification. */
+ explicit FastMixer(audio_io_handle_t threadIoHandle);
virtual ~FastMixer();
FastMixerStateQueue* sq();
@@ -87,6 +90,11 @@
// accessed without lock between multiple threads.
std::atomic_bool mMasterMono;
std::atomic_int_fast64_t mBoottimeOffset;
+
+ const audio_io_handle_t mThreadIoHandle; // parent thread id for debugging purposes
+#ifdef TEE_SINK
+ NBAIO_Tee mTee;
+#endif
}; // class FastMixer
} // namespace android
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index 2e4fb8c..d60643c 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -68,11 +68,12 @@
dprintf(fd, " FastMixer command=%s writeSequence=%u framesWritten=%u\n"
" numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
" sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
- " mixPeriod=%.2f ms\n",
+ " mixPeriod=%.2f ms latency=%.2f ms\n",
FastMixerState::commandToString(mCommand), mWriteSequence, mFramesWritten,
mNumTracks, mWriteErrors, mUnderruns, mOverruns,
mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
- mixPeriodSec * 1e3);
+ mixPeriodSec * 1e3, mLatencyMs);
+ dprintf(fd, " FastMixer Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
#ifdef FAST_THREAD_STATISTICS
// find the interval of valid samples
uint32_t bounds = mBounds;
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index 8ef31d1..9b91cbc 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -18,6 +18,7 @@
#define ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
#include <stdint.h>
+#include <audio_utils/TimestampVerifier.h>
#include "Configuration.h"
#include "FastThreadDumpState.h"
#include "FastMixerState.h"
@@ -66,6 +67,7 @@
void dump(int fd) const; // should only be called on a stable copy, not the original
+ double mLatencyMs = 0.; // measured latency, default of 0 if no valid timestamp read.
uint32_t mWriteSequence; // incremented before and after each write()
uint32_t mFramesWritten; // total number of frames written successfully
uint32_t mNumTracks; // total number of active fast tracks
@@ -74,6 +76,9 @@
size_t mFrameCount;
uint32_t mTrackMask; // mask of active tracks
FastTrackDump mTracks[FastMixerState::kMaxFastTracks];
+
+ // For timestamp statistics.
+ TimestampVerifier<int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
};
} // android
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index 36d8eef..b98842d 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -35,7 +35,7 @@
FastMixerState::FastMixerState() : FastThreadState(),
// mFastTracks
mFastTracksGen(0), mTrackMask(0), mOutputSink(NULL), mOutputSinkGen(0),
- mFrameCount(0), mTeeSink(NULL)
+ mFrameCount(0)
{
int ok = pthread_once(&sMaxFastTracksOnce, sMaxFastTracksInit);
if (ok != 0) {
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 2be1e91..c7fcbd8 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -77,9 +77,6 @@
WRITE = 0x10, // write to output sink
MIX_WRITE = 0x18; // mix tracks and write to output sink
- // This might be a one-time configuration rather than per-state
- NBAIO_Sink* mTeeSink; // if non-NULL, then duplicate write()s to this non-blocking sink
-
// never returns NULL; asserts if command is invalid
static const char *commandToString(Command command);
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
index 6f546c3..968d5aa 100644
--- a/services/audioflinger/MmapTracks.h
+++ b/services/audioflinger/MmapTracks.h
@@ -28,6 +28,7 @@
audio_format_t format,
audio_channel_mask_t channelMask,
audio_session_t sessionId,
+ bool isOut,
uid_t uid,
pid_t pid,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
@@ -39,8 +40,9 @@
audio_session_t triggerSession);
virtual void stop();
virtual bool isFastTrack() const { return false; }
+ bool isDirect() const override { return true; }
- static void appendDumpHeader(String8& result);
+ void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
// protected by MMapThread::mLock
diff --git a/services/audioflinger/NBAIO_Tee.cpp b/services/audioflinger/NBAIO_Tee.cpp
new file mode 100644
index 0000000..53083d5
--- /dev/null
+++ b/services/audioflinger/NBAIO_Tee.cpp
@@ -0,0 +1,517 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "NBAIO_Tee"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <deque>
+#include <dirent.h>
+#include <future>
+#include <list>
+#include <vector>
+
+#include <audio_utils/format.h>
+#include <audio_utils/sndfile.h>
+#include <media/nbaio/PipeReader.h>
+
+#include "Configuration.h"
+#include "NBAIO_Tee.h"
+
+// Enabled with TEE_SINK in Configuration.h
+#ifdef TEE_SINK
+
+namespace android {
+
+/*
+ Tee filenames generated as follows:
+
+ "aftee_Date_ThreadId_C_reason.wav" RecordThread
+ "aftee_Date_ThreadId_M_reason.wav" MixerThread (Normal)
+ "aftee_Date_ThreadId_F_reason.wav" MixerThread (Fast)
+ "aftee_Date_ThreadId_TrackId_R_reason.wav" RecordTrack
+ "aftee_Date_ThreadId_TrackId_TrackName_T_reason.wav" PlaybackTrack
+
+ where Date = YYYYmmdd_HHMMSS_MSEC
+
+ where Reason = [ DTOR | DUMP | REMOVE ]
+
+ Examples:
+ aftee_20180424_153811_038_13_57_2_T_REMOVE.wav
+ aftee_20180424_153811_218_13_57_2_T_REMOVE.wav
+ aftee_20180424_153811_378_13_57_2_T_REMOVE.wav
+ aftee_20180424_153825_147_62_C_DUMP.wav
+ aftee_20180424_153825_148_62_59_R_DUMP.wav
+ aftee_20180424_153825_149_13_F_DUMP.wav
+ aftee_20180424_153842_125_62_59_R_REMOVE.wav
+ aftee_20180424_153842_168_62_C_DTOR.wav
+*/
+
+static constexpr char DEFAULT_PREFIX[] = "aftee_";
+static constexpr char DEFAULT_DIRECTORY[] = "/data/misc/audioserver";
+static constexpr size_t DEFAULT_THREADPOOL_SIZE = 8;
+
+/** AudioFileHandler manages temporary audio wav files with a least recently created
+ retention policy.
+
+ The temporary filenames are systematically generated. A common filename prefix,
+ storage directory, and concurrency pool are passed in on creating the object.
+
+ Temporary files are created by "create", which returns a filename generated by
+
+ prefix + 14 char date + suffix
+
+ TODO Move to audio_utils.
+ TODO Avoid pointing two AudioFileHandlers to the same directory and prefix
+ as we don't have a prefix specific lock file. */
+
+class AudioFileHandler {
+public:
+
+ AudioFileHandler(const std::string &prefix, const std::string &directory, size_t pool)
+ : mThreadPool(pool)
+ , mPrefix(prefix)
+ {
+ (void)setDirectory(directory);
+ }
+
+ /** returns filename of created audio file, else empty string on failure. */
+ std::string create(
+ std::function<ssize_t /* frames_read */
+ (void * /* buffer */, size_t /* size_in_frames */)> reader,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ audio_format_t format,
+ const std::string &suffix);
+
+private:
+ /** sets the current directory. this is currently private to avoid confusion
+ when changing while pending operations are occurring (it's okay, but
+ weakly synchronized). */
+ status_t setDirectory(const std::string &directory);
+
+ /** cleans current directory and returns the directory name done. */
+ status_t clean(std::string *dir = nullptr);
+
+ /** creates an audio file from a reader functor passed in. */
+ status_t createInternal(
+ std::function<ssize_t /* frames_read */
+ (void * /* buffer */, size_t /* size_in_frames */)> reader,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ audio_format_t format,
+ const std::string &filename);
+
+ static bool isDirectoryValid(const std::string &directory) {
+ return directory.size() > 0 && directory[0] == '/';
+ }
+
+ std::string generateFilename(const std::string &suffix) const {
+ char fileTime[sizeof("YYYYmmdd_HHMMSS_\0")];
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ struct tm tm;
+ localtime_r(&tv.tv_sec, &tm);
+ LOG_ALWAYS_FATAL_IF(strftime(fileTime, sizeof(fileTime), "%Y%m%d_%H%M%S_", &tm) == 0,
+ "incorrect fileTime buffer");
+ char msec[4];
+ (void)snprintf(msec, sizeof(msec), "%03d", (int)(tv.tv_usec / 1000));
+ return mPrefix + fileTime + msec + suffix + ".wav";
+ }
+
+ bool isManagedFilename(const char *name) {
+ constexpr size_t FILENAME_LEN_DATE = 4 + 2 + 2 // %Y%m%d%
+ + 1 + 2 + 2 + 2 // _H%M%S
+ + 1 + 3; //_MSEC
+ const size_t prefixLen = mPrefix.size();
+ const size_t nameLen = strlen(name);
+
+ // reject on size, prefix, and .wav
+ if (nameLen < prefixLen + FILENAME_LEN_DATE + 4 /* .wav */
+ || strncmp(name, mPrefix.c_str(), prefixLen) != 0
+ || strcmp(name + nameLen - 4, ".wav") != 0) {
+ return false;
+ }
+
+ // validate date portion
+ const char *date = name + prefixLen;
+ return std::all_of(date, date + 8, isdigit)
+ && date[8] == '_'
+ && std::all_of(date + 9, date + 15, isdigit)
+ && date[15] == '_'
+ && std::all_of(date + 16, date + 19, isdigit);
+ }
+
+ // yet another ThreadPool implementation.
+ class ThreadPool {
+ public:
+ ThreadPool(size_t size)
+ : mThreadPoolSize(size)
+ { }
+
+ /** launches task "name" with associated function "func".
+ if the threadpool is exhausted, it will launch on calling function */
+ status_t launch(const std::string &name, std::function<status_t()> func);
+
+ private:
+ std::mutex mLock;
+ std::list<std::pair<
+ std::string, std::future<status_t>>> mFutures; // GUARDED_BY(mLock)
+
+ const size_t mThreadPoolSize;
+ } mThreadPool;
+
+ const std::string mPrefix;
+ std::mutex mLock;
+ std::string mDirectory; // GUARDED_BY(mLock)
+ std::deque<std::string> mFiles; // GUARDED_BY(mLock) sorted list of files by creation time
+
+ static constexpr size_t FRAMES_PER_READ = 1024;
+ static constexpr size_t MAX_FILES_READ = 1024;
+ static constexpr size_t MAX_FILES_KEEP = 32;
+};
+
+/* static */
+void NBAIO_Tee::NBAIO_TeeImpl::dumpTee(
+ int fd, const NBAIO_SinkSource &sinkSource, const std::string &suffix)
+{
+ // Singleton. Constructed thread-safe on first call, never destroyed.
+ static AudioFileHandler audioFileHandler(
+ DEFAULT_PREFIX, DEFAULT_DIRECTORY, DEFAULT_THREADPOOL_SIZE);
+
+ auto &source = sinkSource.second;
+ if (source.get() == nullptr) {
+ return;
+ }
+
+ const NBAIO_Format format = source->format();
+ bool firstRead = true;
+ std::string filename = audioFileHandler.create(
+ // this functor must not hold references to stack
+ [firstRead, sinkSource] (void *buffer, size_t frames) mutable {
+ auto &source = sinkSource.second;
+ ssize_t actualRead = source->read(buffer, frames);
+ if (actualRead == (ssize_t)OVERRUN && firstRead) {
+ // recheck once
+ actualRead = source->read(buffer, frames);
+ }
+ firstRead = false;
+ return actualRead;
+ },
+ Format_sampleRate(format),
+ Format_channelCount(format),
+ format.mFormat,
+ suffix);
+
+ if (fd >= 0 && filename.size() > 0) {
+ dprintf(fd, "tee wrote to %s\n", filename.c_str());
+ }
+}
+
+/* static */
+NBAIO_Tee::NBAIO_TeeImpl::NBAIO_SinkSource NBAIO_Tee::NBAIO_TeeImpl::makeSinkSource(
+ const NBAIO_Format &format, size_t frames, bool *enabled)
+{
+ if (Format_isValid(format) && audio_is_linear_pcm(format.mFormat)) {
+ Pipe *pipe = new Pipe(frames, format);
+ size_t numCounterOffers = 0;
+ const NBAIO_Format offers[1] = {format};
+ ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
+ if (index != 0) {
+ ALOGW("pipe failure to negotiate: %zd", index);
+ goto exit;
+ }
+ PipeReader *pipeReader = new PipeReader(*pipe);
+ numCounterOffers = 0;
+ index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
+ if (index != 0) {
+ ALOGW("pipeReader failure to negotiate: %zd", index);
+ goto exit;
+ }
+ if (enabled != nullptr) *enabled = true;
+ return {pipe, pipeReader};
+ }
+exit:
+ if (enabled != nullptr) *enabled = false;
+ return {nullptr, nullptr};
+}
+
+std::string AudioFileHandler::create(
+ std::function<ssize_t /* frames_read */
+ (void * /* buffer */, size_t /* size_in_frames */)> reader,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ audio_format_t format,
+ const std::string &suffix)
+{
+ const std::string filename = generateFilename(suffix);
+
+ if (mThreadPool.launch(std::string("create ") + filename,
+ [=]() { return createInternal(reader, sampleRate, channelCount, format, filename); })
+ == NO_ERROR) {
+ return filename;
+ }
+ return "";
+}
+
+status_t AudioFileHandler::setDirectory(const std::string &directory)
+{
+ if (!isDirectoryValid(directory)) return BAD_VALUE;
+
+ // TODO: consider using std::filesystem in C++17
+ DIR *dir = opendir(directory.c_str());
+
+ if (dir == nullptr) {
+ ALOGW("%s: cannot open directory %s", __func__, directory.c_str());
+ return BAD_VALUE;
+ }
+
+ size_t toRemove = 0;
+ decltype(mFiles) files;
+
+ while (files.size() < MAX_FILES_READ) {
+ errno = 0;
+ const struct dirent *result = readdir(dir);
+ if (result == nullptr) {
+ ALOGW_IF(errno != 0, "%s: readdir failure %s", __func__, strerror(errno));
+ break;
+ }
+ // is it a managed filename?
+ if (!isManagedFilename(result->d_name)) {
+ continue;
+ }
+ files.emplace_back(result->d_name);
+ }
+ (void)closedir(dir);
+
+ // OPTIMIZATION: we don't need to stat each file, the filenames names are
+ // already (roughly) ordered by creation date. we use std::deque instead
+ // of std::set for faster insertion and sorting times.
+
+ if (files.size() > MAX_FILES_KEEP) {
+ // removed files can use a partition (no need to do a full sort).
+ toRemove = files.size() - MAX_FILES_KEEP;
+ std::nth_element(files.begin(), files.begin() + toRemove - 1, files.end());
+ }
+
+ // kept files must be sorted.
+ std::sort(files.begin() + toRemove, files.end());
+
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+
+ mDirectory = directory;
+ mFiles = std::move(files);
+ }
+
+ if (toRemove > 0) { // launch a clean in background.
+ (void)mThreadPool.launch(
+ std::string("cleaning ") + directory, [this]() { return clean(); });
+ }
+ return NO_ERROR;
+}
+
+status_t AudioFileHandler::clean(std::string *directory)
+{
+ std::vector<std::string> filesToRemove;
+ std::string dir;
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+
+ if (!isDirectoryValid(mDirectory)) return NO_INIT;
+
+ dir = mDirectory;
+ if (mFiles.size() > MAX_FILES_KEEP) {
+ size_t toRemove = mFiles.size() - MAX_FILES_KEEP;
+
+ // use move and erase to efficiently transfer std::string
+ std::move(mFiles.begin(),
+ mFiles.begin() + toRemove,
+ std::back_inserter(filesToRemove));
+ mFiles.erase(mFiles.begin(), mFiles.begin() + toRemove);
+ }
+ }
+
+ std::string dirp = dir + "/";
+ // remove files outside of lock for better concurrency.
+ for (const auto &file : filesToRemove) {
+ (void)unlink((dirp + file).c_str());
+ }
+
+ // return the directory if requested.
+ if (directory != nullptr) {
+ *directory = dir;
+ }
+ return NO_ERROR;
+}
+
+status_t AudioFileHandler::ThreadPool::launch(
+ const std::string &name, std::function<status_t()> func)
+{
+ if (mThreadPoolSize > 1) {
+ std::lock_guard<std::mutex> _l(mLock);
+ if (mFutures.size() >= mThreadPoolSize) {
+ for (auto it = mFutures.begin(); it != mFutures.end();) {
+ const std::string &filename = it->first;
+ std::future<status_t> &future = it->second;
+ if (!future.valid() ||
+ future.wait_for(std::chrono::seconds(0)) == std::future_status::ready) {
+ ALOGV("%s: future %s ready", __func__, filename.c_str());
+ it = mFutures.erase(it);
+ } else {
+ ALOGV("%s: future %s not ready", __func__, filename.c_str());
+ ++it;
+ }
+ }
+ }
+ if (mFutures.size() < mThreadPoolSize) {
+ ALOGV("%s: deferred calling %s", __func__, name.c_str());
+ mFutures.emplace_back(name, std::async(std::launch::async, func));
+ return NO_ERROR;
+ }
+ }
+ ALOGV("%s: immediate calling %s", __func__, name.c_str());
+ return func();
+}
+
+status_t AudioFileHandler::createInternal(
+ std::function<ssize_t /* frames_read */
+ (void * /* buffer */, size_t /* size_in_frames */)> reader,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ audio_format_t format,
+ const std::string &filename)
+{
+ // Attempt to choose the best matching file format.
+ // We can choose any sf_format
+ // but writeFormat must be one of 16, 32, float
+ // due to sf_writef compatibility.
+ int sf_format;
+ audio_format_t writeFormat;
+ switch (format) {
+ case AUDIO_FORMAT_PCM_8_BIT:
+ case AUDIO_FORMAT_PCM_16_BIT:
+ sf_format = SF_FORMAT_PCM_16;
+ writeFormat = AUDIO_FORMAT_PCM_16_BIT;
+ ALOGV("%s: %s using PCM_16 for format %#x", __func__, filename.c_str(), format);
+ break;
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ case AUDIO_FORMAT_PCM_32_BIT:
+ sf_format = SF_FORMAT_PCM_32;
+ writeFormat = AUDIO_FORMAT_PCM_32_BIT;
+ ALOGV("%s: %s using PCM_32 for format %#x", __func__, filename.c_str(), format);
+ break;
+ case AUDIO_FORMAT_PCM_FLOAT:
+ sf_format = SF_FORMAT_FLOAT;
+ writeFormat = AUDIO_FORMAT_PCM_FLOAT;
+ ALOGV("%s: %s using PCM_FLOAT for format %#x", __func__, filename.c_str(), format);
+ break;
+ default:
+ // TODO:
+ // handle audio_has_proportional_frames() formats.
+ // handle compressed formats as single byte files.
+ return BAD_VALUE;
+ }
+
+ std::string directory;
+ status_t status = clean(&directory);
+ if (status != NO_ERROR) return status;
+ std::string dirPrefix = directory + "/";
+
+ const std::string path = dirPrefix + filename;
+
+ /* const */ SF_INFO info = {
+ .frames = 0,
+ .samplerate = (int)sampleRate,
+ .channels = (int)channelCount,
+ .format = SF_FORMAT_WAV | sf_format,
+ };
+ SNDFILE *sf = sf_open(path.c_str(), SFM_WRITE, &info);
+ if (sf == nullptr) {
+ return INVALID_OPERATION;
+ }
+
+ size_t total = 0;
+ void *buffer = malloc(FRAMES_PER_READ * std::max(
+ channelCount * audio_bytes_per_sample(writeFormat), //output framesize
+ channelCount * audio_bytes_per_sample(format))); // input framesize
+ if (buffer == nullptr) {
+ sf_close(sf);
+ return NO_MEMORY;
+ }
+
+ for (;;) {
+ const ssize_t actualRead = reader(buffer, FRAMES_PER_READ);
+ if (actualRead <= 0) {
+ break;
+ }
+
+ // Convert input format to writeFormat as needed.
+ if (format != writeFormat) {
+ memcpy_by_audio_format(
+ buffer, writeFormat, buffer, format, actualRead * info.channels);
+ }
+
+ ssize_t reallyWritten;
+ switch (writeFormat) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ reallyWritten = sf_writef_short(sf, (const int16_t *)buffer, actualRead);
+ break;
+ case AUDIO_FORMAT_PCM_32_BIT:
+ reallyWritten = sf_writef_int(sf, (const int32_t *)buffer, actualRead);
+ break;
+ case AUDIO_FORMAT_PCM_FLOAT:
+ reallyWritten = sf_writef_float(sf, (const float *)buffer, actualRead);
+ break;
+ default:
+ LOG_ALWAYS_FATAL("%s: %s writeFormat: %#x", __func__, filename.c_str(), writeFormat);
+ break;
+ }
+
+ if (reallyWritten < 0) {
+ ALOGW("%s: %s write error: %zd", __func__, filename.c_str(), reallyWritten);
+ break;
+ }
+ total += reallyWritten;
+ if (reallyWritten < actualRead) {
+ ALOGW("%s: %s write short count: %zd < %zd",
+ __func__, filename.c_str(), reallyWritten, actualRead);
+ break;
+ }
+ }
+ sf_close(sf);
+ free(buffer);
+ if (total == 0) {
+ (void)unlink(path.c_str());
+ return NOT_ENOUGH_DATA;
+ }
+
+ // Success: add our name to managed files.
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+ // weak synchronization - only update mFiles if the directory hasn't changed.
+ if (mDirectory == directory) {
+ mFiles.emplace_back(filename); // add to the end to preserve sort.
+ }
+ }
+ return NO_ERROR; // return full path
+}
+
+} // namespace android
+
+#endif // TEE_SINK
diff --git a/services/audioflinger/NBAIO_Tee.h b/services/audioflinger/NBAIO_Tee.h
new file mode 100644
index 0000000..fed8cc8
--- /dev/null
+++ b/services/audioflinger/NBAIO_Tee.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Enabled with TEE_SINK in Configuration.h
+#ifndef ANDROID_NBAIO_TEE_H
+#define ANDROID_NBAIO_TEE_H
+
+#ifdef TEE_SINK
+
+#include <atomic>
+#include <mutex>
+#include <set>
+
+#include <cutils/properties.h>
+#include <media/nbaio/NBAIO.h>
+
+namespace android {
+
+/**
+ * The NBAIO_Tee uses the NBAIO Pipe and PipeReader for nonblocking
+ * data collection, for eventual dump to log files.
+ * See https://source.android.com/devices/audio/debugging for how to
+ * enable by ro.debuggable and af.tee properties.
+ *
+ * The write() into the NBAIO_Tee is therefore nonblocking,
+ * but changing NBAIO_Tee formats with set() cannot be done during a write();
+ * usually the caller already implements this mutual exclusion.
+ *
+ * All other calls except set() vs write() may occur at any time.
+ *
+ * dump() disruption is minimized to the caller since system calls are executed
+ * in an asynchronous thread (when possible).
+ *
+ * Currently the NBAIO_Tee is "hardwired" for AudioFlinger support.
+ *
+ * Some AudioFlinger specific notes:
+ *
+ * 1) Tees capture only linear PCM data.
+ * 2) Tees without any data written are considered empty and do not generate
+ * any output files.
+ * 2) Once a Tee dumps data, it is considered "emptied" and new data
+ * needs to be written before another Tee file is generated.
+ * 3) Tee file format is
+ * WAV integer PCM 16 bit for AUDIO_FORMAT_PCM_8_BIT, AUDIO_FORMAT_PCM_16_BIT.
+ * WAV integer PCM 32 bit for AUDIO_FORMAT_PCM_8_24_BIT, AUDIO_FORMAT_PCM_24_BIT_PACKED
+ * AUDIO_FORMAT_PCM_32_BIT.
+ * WAV float PCM 32 bit for AUDIO_FORMAT_PCM_FLOAT.
+ *
+ * Input_Thread:
+ * 1) Capture buffer is teed when read from the HAL, before resampling for the AudioRecord
+ * client.
+ *
+ * Output_Thread:
+ * 1) MixerThreads will tee at the FastMixer output (if it has one) or at the
+ * NormalMixer output (if no FastMixer).
+ * 2) DuplicatingThreads do not tee any mixed data. Apply a tee on the downstream OutputTrack
+ * or on the upstream playback Tracks.
+ * 3) DirectThreads and OffloadThreads do not tee any data. The upstream track
+ * (if linear PCM format) may be teed to discover data.
+ * 4) MmapThreads are not supported.
+ *
+ * Tracks:
+ * 1) RecordTracks and playback Tracks tee as data is being written to or
+ * read from the shared client-server track buffer by the associated Threads.
+ * 2) The mechanism is on the AudioBufferProvider release() so large static Track
+ * playback may not show any Tee data depending on when it is released.
+ * 3) When a track becomes inactive, the Thread will trigger a dump.
+ */
+
+class NBAIO_Tee {
+public:
+ /* TEE_FLAG is used in set() and must match the flags for the af.tee property
+ given in https://source.android.com/devices/audio/debugging
+ */
+ enum TEE_FLAG {
+ TEE_FLAG_NONE = 0,
+ TEE_FLAG_INPUT_THREAD = (1 << 0), // treat as a Tee for input (Capture) Threads
+ TEE_FLAG_OUTPUT_THREAD = (1 << 1), // treat as a Tee for output (Playback) Threads
+ TEE_FLAG_TRACK = (1 << 2), // treat as a Tee for tracks (Record and Playback)
+ };
+
+ NBAIO_Tee()
+ : mTee(std::make_shared<NBAIO_TeeImpl>())
+ {
+ getRunningTees().add(mTee);
+ }
+
+ ~NBAIO_Tee() {
+ getRunningTees().remove(mTee);
+ dump(-1, "_DTOR"); // log any data remaining in Tee.
+ }
+
+ /**
+ * \brief set is used for deferred configuration of Tee.
+ *
+ * May be called anytime except concurrently with write().
+ *
+ * \param format NBAIO_Format used to open NBAIO pipes
+ * \param flags (https://source.android.com/devices/audio/debugging)
+ * - TEE_FLAG_NONE to bypass af.tee property checks (default);
+ * - TEE_FLAG_INPUT_THREAD to check af.tee if input thread logging set;
+ * - TEE_FLAG_OUTPUT_THREAD to check af.tee if output thread logging set;
+ * - TEE_FLAG_TRACK to check af.tee if track logging set.
+ * \param frames number of frames to open the NBAIO pipe (set to 0 to use default).
+ *
+ * \return
+ * - NO_ERROR on success (or format unchanged)
+ * - BAD_VALUE if format or flags invalid.
+ * - PERMISSION_DENIED if flags not allowed by af.tee
+ */
+
+ status_t set(const NBAIO_Format &format,
+ TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const {
+ return mTee->set(format, flags, frames);
+ }
+
+ status_t set(uint32_t sampleRate, uint32_t channelCount, audio_format_t format,
+ TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const {
+ return mTee->set(Format_from_SR_C(sampleRate, channelCount, format), flags, frames);
+ }
+
+ /**
+ * \brief write data to the tee.
+ *
+ * This call is lock free (as shared pointer and NBAIO is lock free);
+ * may be called simultaneous to all methods except set().
+ *
+ * \param buffer to write to pipe.
+ * \param frameCount in frames as specified by the format passed to set()
+ */
+
+ void write(const void *buffer, size_t frameCount) const {
+ mTee->write(buffer, frameCount);
+ }
+
+ /** sets Tee id string which identifies the generated file (should be unique). */
+ void setId(const std::string &id) const {
+ mTee->setId(id);
+ }
+
+ /**
+ * \brief dump the audio content written to the Tee.
+ *
+ * \param fd file descriptor to write dumped filename for logging, use -1 to ignore.
+ * \param reason string suffix to append to the generated file.
+ */
+ void dump(int fd, const std::string &reason = "") const {
+ mTee->dump(fd, reason);
+ }
+
+ /**
+ * \brief dump all Tees currently alive.
+ *
+ * \param fd file descriptor to write dumped filename for logging, use -1 to ignore.
+ * \param reason string suffix to append to the generated file.
+ */
+ static void dumpAll(int fd, const std::string &reason = "") {
+ getRunningTees().dump(fd, reason);
+ }
+
+private:
+
+ /** The underlying implementation of the Tee - the lifetime is through
+ a shared pointer so destruction of the NBAIO_Tee container may proceed
+ even though dumping is occurring. */
+ class NBAIO_TeeImpl {
+ public:
+ status_t set(const NBAIO_Format &format, TEE_FLAG flags, size_t frames) {
+ static const int teeConfig = property_get_bool("ro.debuggable", false)
+ ? property_get_int32("af.tee", 0) : 0;
+
+ // check the type of Tee
+ const TEE_FLAG type = TEE_FLAG(
+ flags & (TEE_FLAG_INPUT_THREAD | TEE_FLAG_OUTPUT_THREAD | TEE_FLAG_TRACK));
+
+ // parameter flags can't select multiple types.
+ if (__builtin_popcount(type) > 1) {
+ return BAD_VALUE;
+ }
+
+ // if type is set, we check to see if it is permitted by configuration.
+ if (type != 0 && (type & teeConfig) == 0) {
+ return PERMISSION_DENIED;
+ }
+
+ // determine number of frames for Tee
+ if (frames == 0) {
+ // TODO: consider varying frame count based on type.
+ frames = DEFAULT_TEE_FRAMES;
+ }
+
+ // TODO: should we check minimum number of frames?
+
+ // don't do anything if format and frames are the same.
+ if (Format_isEqual(format, mFormat) && frames == mFrames) {
+ return NO_ERROR;
+ }
+
+ bool enabled = false;
+ auto sinksource = makeSinkSource(format, frames, &enabled);
+
+ // enabled is set if makeSinkSource is successful.
+ // Note: as mentioned in NBAIO_Tee::set(), don't call set() while write() is
+ // ongoing.
+ if (enabled) {
+ std::lock_guard<std::mutex> _l(mLock);
+ mFlags = flags;
+ mFormat = format; // could get this from the Sink.
+ mFrames = frames;
+ mSinkSource = std::move(sinksource);
+ mEnabled.store(true);
+ return NO_ERROR;
+ }
+ return BAD_VALUE;
+ }
+
+ void setId(const std::string &id) {
+ std::lock_guard<std::mutex> _l(mLock);
+ mId = id;
+ }
+
+ void dump(int fd, const std::string &reason) {
+ if (!mDataReady.exchange(false)) return;
+ std::string suffix;
+ NBAIO_SinkSource sinkSource;
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+ suffix = mId + reason;
+ sinkSource = mSinkSource;
+ }
+ dumpTee(fd, sinkSource, suffix);
+ }
+
+ void write(const void *buffer, size_t frameCount) {
+ if (!mEnabled.load() || frameCount == 0) return;
+ (void)mSinkSource.first->write(buffer, frameCount);
+ mDataReady.store(true);
+ }
+
+ private:
+ // TRICKY: We need to keep the NBAIO_Sink and NBAIO_Source both alive at the same time
+ // because PipeReader holds a naked reference (not a strong or weak pointer) to Pipe.
+ using NBAIO_SinkSource = std::pair<sp<NBAIO_Sink>, sp<NBAIO_Source>>;
+
+ static void dumpTee(int fd, const NBAIO_SinkSource& sinkSource, const std::string& suffix);
+
+ static NBAIO_SinkSource makeSinkSource(
+ const NBAIO_Format &format, size_t frames, bool *enabled);
+
+ // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes
+ static constexpr size_t DEFAULT_TEE_FRAMES = 0x200000;
+
+ // atomic status checking
+ std::atomic<bool> mEnabled{false};
+ std::atomic<bool> mDataReady{false};
+
+ // locked dump information
+ mutable std::mutex mLock;
+ std::string mId; // GUARDED_BY(mLock)
+ TEE_FLAG mFlags = TEE_FLAG_NONE; // GUARDED_BY(mLock)
+ NBAIO_Format mFormat = Format_Invalid; // GUARDED_BY(mLock)
+ size_t mFrames = 0; // GUARDED_BY(mLock)
+ NBAIO_SinkSource mSinkSource; // GUARDED_BY(mLock)
+ };
+
+ /** RunningTees tracks current running tees for dump purposes.
+ It is implemented to have minimal locked regions, to be transparent to the caller. */
+ class RunningTees {
+ public:
+ void add(const std::shared_ptr<NBAIO_TeeImpl> &tee) {
+ std::lock_guard<std::mutex> _l(mLock);
+ ALOGW_IF(!mTees.emplace(tee).second,
+ "%s: %p already exists in mTees", __func__, tee.get());
+ }
+
+ void remove(const std::shared_ptr<NBAIO_TeeImpl> &tee) {
+ std::lock_guard<std::mutex> _l(mLock);
+ ALOGW_IF(mTees.erase(tee) != 1,
+ "%s: %p doesn't exist in mTees", __func__, tee.get());
+ }
+
+ void dump(int fd, const std::string &reason) {
+ std::vector<std::shared_ptr<NBAIO_TeeImpl>> tees; // safe snapshot of tees
+ {
+ std::lock_guard<std::mutex> _l(mLock);
+ tees.insert(tees.end(), mTees.begin(), mTees.end());
+ }
+ for (const auto &tee : tees) {
+ tee->dump(fd, reason);
+ }
+ }
+
+ private:
+ std::mutex mLock;
+ std::set<std::shared_ptr<NBAIO_TeeImpl>> mTees; // GUARDED_BY(mLock)
+ };
+
+ // singleton
+ static RunningTees &getRunningTees() {
+ static RunningTees runningTees;
+ return runningTees;
+ }
+
+ // The NBAIO TeeImpl may have lifetime longer than NBAIO_Tee if
+ // RunningTees::dump() is being called simultaneous to ~NBAIO_Tee().
+ // This is allowed for maximum concurrency.
+ const std::shared_ptr<NBAIO_TeeImpl> mTee;
+}; // NBAIO_Tee
+
+} // namespace android
+
+#endif // TEE_SINK
+#endif // !ANDROID_NBAIO_TEE_H
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index e5cb8a2..15a60c2 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -24,8 +24,9 @@
#include <audio_utils/primitives.h>
#include "AudioFlinger.h"
-#include "ServiceUtilities.h"
#include <media/AudioParameter.h>
+#include <media/PatchBuilder.h>
+#include <mediautils/ServiceUtilities.h>
// ----------------------------------------------------------------------------
@@ -49,111 +50,77 @@
struct audio_port *ports)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->listAudioPorts(num_ports, ports);
- }
- return NO_INIT;
+ return mPatchPanel.listAudioPorts(num_ports, ports);
}
/* Get supported attributes for a given audio port */
status_t AudioFlinger::getAudioPort(struct audio_port *port)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->getAudioPort(port);
- }
- return NO_INIT;
+ return mPatchPanel.getAudioPort(port);
}
-
/* Connect a patch between several source and sink ports */
status_t AudioFlinger::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->createAudioPatch(patch, handle);
- }
- return NO_INIT;
+ return mPatchPanel.createAudioPatch(patch, handle);
}
/* Disconnect a patch */
status_t AudioFlinger::releaseAudioPatch(audio_patch_handle_t handle)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->releaseAudioPatch(handle);
- }
- return NO_INIT;
+ return mPatchPanel.releaseAudioPatch(handle);
}
-
/* List connected audio ports and they attributes */
status_t AudioFlinger::listAudioPatches(unsigned int *num_patches,
struct audio_patch *patches)
{
Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->listAudioPatches(num_patches, patches);
+ return mPatchPanel.listAudioPatches(num_patches, patches);
+}
+
+status_t AudioFlinger::PatchPanel::SoftwarePatch::getLatencyMs_l(double *latencyMs) const
+{
+ const auto& iter = mPatchPanel.mPatches.find(mPatchHandle);
+ if (iter != mPatchPanel.mPatches.end()) {
+ return iter->second.getLatencyMs(latencyMs);
+ } else {
+ return BAD_VALUE;
}
- return NO_INIT;
-}
-
-/* Set audio port configuration */
-status_t AudioFlinger::setAudioPortConfig(const struct audio_port_config *config)
-{
- Mutex::Autolock _l(mLock);
- if (mPatchPanel != 0) {
- return mPatchPanel->setAudioPortConfig(config);
- }
- return NO_INIT;
-}
-
-
-AudioFlinger::PatchPanel::PatchPanel(const sp<AudioFlinger>& audioFlinger)
- : mAudioFlinger(audioFlinger)
-{
-}
-
-AudioFlinger::PatchPanel::~PatchPanel()
-{
}
/* List connected audio ports and their attributes */
status_t AudioFlinger::PatchPanel::listAudioPorts(unsigned int *num_ports __unused,
struct audio_port *ports __unused)
{
- ALOGV("listAudioPorts");
+ ALOGV(__func__);
return NO_ERROR;
}
/* Get supported attributes for a given audio port */
status_t AudioFlinger::PatchPanel::getAudioPort(struct audio_port *port __unused)
{
- ALOGV("getAudioPort");
+ ALOGV(__func__);
return NO_ERROR;
}
-
/* Connect a patch between several source and sink ports */
status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
- status_t status = NO_ERROR;
- audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
- sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
if (handle == NULL || patch == NULL) {
return BAD_VALUE;
}
- ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
- patch->num_sources, patch->num_sinks, *handle);
- if (audioflinger == 0) {
- return NO_INIT;
- }
+ ALOGV("%s() num_sources %d num_sinks %d handle %d",
+ __func__, patch->num_sources, patch->num_sinks, *handle);
+ status_t status = NO_ERROR;
+ audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
- if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
- (patch->num_sinks == 0 && patch->num_sources != 2) ||
- patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+ if (!audio_patch_is_valid(patch) || (patch->num_sinks == 0 && patch->num_sources != 2)) {
return BAD_VALUE;
}
// limit number of sources to 1 for now or 2 sources for special cross hw module case.
@@ -163,81 +130,73 @@
}
if (*handle != AUDIO_PATCH_HANDLE_NONE) {
- for (size_t index = 0; *handle != 0 && index < mPatches.size(); index++) {
- if (*handle == mPatches[index]->mHandle) {
- ALOGV("createAudioPatch() removing patch handle %d", *handle);
- halHandle = mPatches[index]->mHalHandle;
- Patch *removedPatch = mPatches[index];
- // free resources owned by the removed patch if applicable
- // 1) if a software patch is present, release the playback and capture threads and
- // tracks created. This will also release the corresponding audio HAL patches
- if ((removedPatch->mRecordPatchHandle
- != AUDIO_PATCH_HANDLE_NONE) ||
- (removedPatch->mPlaybackPatchHandle !=
- AUDIO_PATCH_HANDLE_NONE)) {
- clearPatchConnections(removedPatch);
- }
- // 2) if the new patch and old patch source or sink are devices from different
- // hw modules, clear the audio HAL patches now because they will not be updated
- // by call to create_audio_patch() below which will happen on a different HW module
- if (halHandle != AUDIO_PATCH_HANDLE_NONE) {
- audio_module_handle_t hwModule = AUDIO_MODULE_HANDLE_NONE;
- if ((removedPatch->mAudioPatch.sources[0].type == AUDIO_PORT_TYPE_DEVICE) &&
- ((patch->sources[0].type != AUDIO_PORT_TYPE_DEVICE) ||
- (removedPatch->mAudioPatch.sources[0].ext.device.hw_module !=
- patch->sources[0].ext.device.hw_module))) {
- hwModule = removedPatch->mAudioPatch.sources[0].ext.device.hw_module;
- } else if ((patch->num_sinks == 0) ||
- ((removedPatch->mAudioPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
- ((patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE) ||
- (removedPatch->mAudioPatch.sinks[0].ext.device.hw_module !=
- patch->sinks[0].ext.device.hw_module)))) {
- // Note on (patch->num_sinks == 0): this situation should not happen as
- // these special patches are only created by the policy manager but just
- // in case, systematically clear the HAL patch.
- // Note that removedPatch->mAudioPatch.num_sinks cannot be 0 here because
- // halHandle would be AUDIO_PATCH_HANDLE_NONE in this case.
- hwModule = removedPatch->mAudioPatch.sinks[0].ext.device.hw_module;
- }
- if (hwModule != AUDIO_MODULE_HANDLE_NONE) {
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(hwModule);
- if (index >= 0) {
- sp<DeviceHalInterface> hwDevice =
- audioflinger->mAudioHwDevs.valueAt(index)->hwDevice();
- hwDevice->releaseAudioPatch(halHandle);
- }
- }
- }
- mPatches.removeAt(index);
- delete removedPatch;
- break;
+ auto iter = mPatches.find(*handle);
+ if (iter != mPatches.end()) {
+ ALOGV("%s() removing patch handle %d", __func__, *handle);
+ Patch &removedPatch = iter->second;
+ // free resources owned by the removed patch if applicable
+ // 1) if a software patch is present, release the playback and capture threads and
+ // tracks created. This will also release the corresponding audio HAL patches
+ if (removedPatch.isSoftware()) {
+ removedPatch.clearConnections(this);
}
+ // 2) if the new patch and old patch source or sink are devices from different
+ // hw modules, clear the audio HAL patches now because they will not be updated
+ // by call to create_audio_patch() below which will happen on a different HW module
+ if (removedPatch.mHalHandle != AUDIO_PATCH_HANDLE_NONE) {
+ audio_module_handle_t hwModule = AUDIO_MODULE_HANDLE_NONE;
+ const struct audio_patch &oldPatch = removedPatch.mAudioPatch;
+ if (oldPatch.sources[0].type == AUDIO_PORT_TYPE_DEVICE &&
+ (patch->sources[0].type != AUDIO_PORT_TYPE_DEVICE ||
+ oldPatch.sources[0].ext.device.hw_module !=
+ patch->sources[0].ext.device.hw_module)) {
+ hwModule = oldPatch.sources[0].ext.device.hw_module;
+ } else if (patch->num_sinks == 0 ||
+ (oldPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE &&
+ (patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE ||
+ oldPatch.sinks[0].ext.device.hw_module !=
+ patch->sinks[0].ext.device.hw_module))) {
+ // Note on (patch->num_sinks == 0): this situation should not happen as
+ // these special patches are only created by the policy manager but just
+ // in case, systematically clear the HAL patch.
+ // Note that removedPatch.mAudioPatch.num_sinks cannot be 0 here because
+ // removedPatch.mHalHandle would be AUDIO_PATCH_HANDLE_NONE in this case.
+ hwModule = oldPatch.sinks[0].ext.device.hw_module;
+ }
+ sp<DeviceHalInterface> hwDevice = findHwDeviceByModule(hwModule);
+ if (hwDevice != 0) {
+ hwDevice->releaseAudioPatch(removedPatch.mHalHandle);
+ }
+ }
+ mPatches.erase(iter);
+ removeSoftwarePatchFromInsertedModules(*handle);
}
}
- Patch *newPatch = new Patch(patch);
+ Patch newPatch{*patch};
+ audio_module_handle_t insertedModule = AUDIO_MODULE_HANDLE_NONE;
switch (patch->sources[0].type) {
case AUDIO_PORT_TYPE_DEVICE: {
audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module;
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
- if (index < 0) {
- ALOGW("createAudioPatch() bad src hw module %d", srcModule);
+ AudioHwDevice *audioHwDevice = findAudioHwDeviceByModule(srcModule);
+ if (!audioHwDevice) {
status = BAD_VALUE;
goto exit;
}
- AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
for (unsigned int i = 0; i < patch->num_sinks; i++) {
// support only one sink if connection to a mix or across HW modules
if ((patch->sinks[i].type == AUDIO_PORT_TYPE_MIX ||
- patch->sinks[i].ext.mix.hw_module != srcModule) &&
+ (patch->sinks[i].type == AUDIO_PORT_TYPE_DEVICE &&
+ patch->sinks[i].ext.device.hw_module != srcModule)) &&
patch->num_sinks > 1) {
+ ALOGW("%s() multiple sinks for mix or across modules not supported", __func__);
status = INVALID_OPERATION;
goto exit;
}
// reject connection to different sink types
if (patch->sinks[i].type != patch->sinks[0].type) {
- ALOGW("createAudioPatch() different sink types in same patch not supported");
+ ALOGW("%s() different sink types in same patch not supported", __func__);
status = BAD_VALUE;
goto exit;
}
@@ -256,38 +215,52 @@
if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
(patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
patch->sources[1].ext.mix.hw_module)) {
- ALOGW("createAudioPatch() invalid source combination");
+ ALOGW("%s() invalid source combination", __func__);
status = INVALID_OPERATION;
goto exit;
}
sp<ThreadBase> thread =
- audioflinger->checkPlaybackThread_l(patch->sources[1].ext.mix.handle);
- newPatch->mPlaybackThread = (MixerThread *)thread.get();
+ mAudioFlinger.checkPlaybackThread_l(patch->sources[1].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() cannot get playback thread");
+ ALOGW("%s() cannot get playback thread", __func__);
status = INVALID_OPERATION;
goto exit;
}
+ // existing playback thread is reused, so it is not closed when patch is cleared
+ newPatch.mPlayback.setThread(
+ reinterpret_cast<PlaybackThread*>(thread.get()), false /*closeThread*/);
} else {
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
audio_devices_t device = patch->sinks[0].ext.device.type;
String8 address = String8(patch->sinks[0].ext.device.address);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- sp<ThreadBase> thread = audioflinger->openOutput_l(
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
+ if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+ config.sample_rate = patch->sinks[0].sample_rate;
+ }
+ if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+ config.channel_mask = patch->sinks[0].channel_mask;
+ }
+ if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+ config.format = patch->sinks[0].format;
+ }
+ if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+ flags = patch->sinks[0].flags.output;
+ }
+ sp<ThreadBase> thread = mAudioFlinger.openOutput_l(
patch->sinks[0].ext.device.hw_module,
&output,
&config,
device,
address,
- AUDIO_OUTPUT_FLAG_NONE);
- newPatch->mPlaybackThread = (PlaybackThread *)thread.get();
- ALOGV("audioflinger->openOutput_l() returned %p",
- newPatch->mPlaybackThread.get());
- if (newPatch->mPlaybackThread == 0) {
+ flags);
+ ALOGV("mAudioFlinger.openOutput_l() returned %p", thread.get());
+ if (thread == 0) {
status = NO_MEMORY;
goto exit;
}
+ newPatch.mPlayback.setThread(reinterpret_cast<PlaybackThread*>(thread.get()));
}
audio_devices_t device = patch->sources[0].ext.device.type;
String8 address = String8(patch->sources[0].ext.device.address);
@@ -297,47 +270,53 @@
if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
config.sample_rate = patch->sources[0].sample_rate;
} else {
- config.sample_rate = newPatch->mPlaybackThread->sampleRate();
+ config.sample_rate = newPatch.mPlayback.thread()->sampleRate();
}
if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
config.channel_mask = patch->sources[0].channel_mask;
} else {
- config.channel_mask =
- audio_channel_in_mask_from_count(newPatch->mPlaybackThread->channelCount());
+ config.channel_mask = audio_channel_in_mask_from_count(
+ newPatch.mPlayback.thread()->channelCount());
}
if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FORMAT) {
config.format = patch->sources[0].format;
} else {
- config.format = newPatch->mPlaybackThread->format();
+ config.format = newPatch.mPlayback.thread()->format();
}
+ audio_input_flags_t flags =
+ patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+ patch->sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- sp<ThreadBase> thread = audioflinger->openInput_l(srcModule,
+ sp<ThreadBase> thread = mAudioFlinger.openInput_l(srcModule,
&input,
&config,
device,
address,
AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE);
- newPatch->mRecordThread = (RecordThread *)thread.get();
- ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x",
- newPatch->mRecordThread.get(), config.channel_mask);
- if (newPatch->mRecordThread == 0) {
+ flags);
+ ALOGV("mAudioFlinger.openInput_l() returned %p inChannelMask %08x",
+ thread.get(), config.channel_mask);
+ if (thread == 0) {
status = NO_MEMORY;
goto exit;
}
- status = createPatchConnections(newPatch, patch);
+ newPatch.mRecord.setThread(reinterpret_cast<RecordThread*>(thread.get()));
+ status = newPatch.createConnections(this);
if (status != NO_ERROR) {
goto exit;
}
+ if (audioHwDevice->isInsert()) {
+ insertedModule = audioHwDevice->handle();
+ }
} else {
if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
- sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
+ sp<ThreadBase> thread = mAudioFlinger.checkRecordThread_l(
patch->sinks[0].ext.mix.handle);
if (thread == 0) {
- thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+ thread = mAudioFlinger.checkMmapThread_l(patch->sinks[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() bad capture I/O handle %d",
- patch->sinks[0].ext.mix.handle);
+ ALOGW("%s() bad capture I/O handle %d",
+ __func__, patch->sinks[0].ext.mix.handle);
status = BAD_VALUE;
goto exit;
}
@@ -356,9 +335,9 @@
} break;
case AUDIO_PORT_TYPE_MIX: {
audio_module_handle_t srcModule = patch->sources[0].ext.mix.hw_module;
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
+ ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(srcModule);
if (index < 0) {
- ALOGW("createAudioPatch() bad src hw module %d", srcModule);
+ ALOGW("%s() bad src hw module %d", __func__, srcModule);
status = BAD_VALUE;
goto exit;
}
@@ -366,8 +345,8 @@
audio_devices_t type = AUDIO_DEVICE_NONE;
for (unsigned int i = 0; i < patch->num_sinks; i++) {
if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
- ALOGW("createAudioPatch() invalid sink type %d for mix source",
- patch->sinks[i].type);
+ ALOGW("%s() invalid sink type %d for mix source",
+ __func__, patch->sinks[i].type);
status = BAD_VALUE;
goto exit;
}
@@ -379,21 +358,21 @@
type |= patch->sinks[i].ext.device.type;
}
sp<ThreadBase> thread =
- audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
+ mAudioFlinger.checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
if (thread == 0) {
- thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+ thread = mAudioFlinger.checkMmapThread_l(patch->sources[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() bad playback I/O handle %d",
- patch->sources[0].ext.mix.handle);
+ ALOGW("%s() bad playback I/O handle %d",
+ __func__, patch->sources[0].ext.mix.handle);
status = BAD_VALUE;
goto exit;
}
}
- if (thread == audioflinger->primaryPlaybackThread_l()) {
+ if (thread == mAudioFlinger.primaryPlaybackThread_l()) {
AudioParameter param = AudioParameter();
param.addInt(String8(AudioParameter::keyRouting), (int)type);
- audioflinger->broacastParametersToRecordThreads_l(param.toString());
+ mAudioFlinger.broacastParametersToRecordThreads_l(param.toString());
}
status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
@@ -403,295 +382,363 @@
goto exit;
}
exit:
- ALOGV("createAudioPatch() status %d", status);
+ ALOGV("%s() status %d", __func__, status);
if (status == NO_ERROR) {
- *handle = (audio_patch_handle_t) audioflinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
- newPatch->mHandle = *handle;
- newPatch->mHalHandle = halHandle;
- mPatches.add(newPatch);
- ALOGV("createAudioPatch() added new patch handle %d halHandle %d", *handle, halHandle);
+ *handle = (audio_patch_handle_t) mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
+ newPatch.mHalHandle = halHandle;
+ mPatches.insert(std::make_pair(*handle, std::move(newPatch)));
+ if (insertedModule != AUDIO_MODULE_HANDLE_NONE) {
+ addSoftwarePatchToInsertedModules(insertedModule, *handle);
+ }
+ ALOGV("%s() added new patch handle %d halHandle %d", __func__, *handle, halHandle);
} else {
- clearPatchConnections(newPatch);
- delete newPatch;
+ newPatch.clearConnections(this);
}
return status;
}
-status_t AudioFlinger::PatchPanel::createPatchConnections(Patch *patch,
- const struct audio_patch *audioPatch)
+AudioFlinger::PatchPanel::Patch::~Patch()
+{
+ ALOGE_IF(isSoftware(), "Software patch connections leaked %d %d",
+ mRecord.handle(), mPlayback.handle());
+}
+
+status_t AudioFlinger::PatchPanel::Patch::createConnections(PatchPanel *panel)
{
// create patch from source device to record thread input
- struct audio_patch subPatch;
- subPatch.num_sources = 1;
- subPatch.sources[0] = audioPatch->sources[0];
- subPatch.num_sinks = 1;
-
- patch->mRecordThread->getAudioPortConfig(&subPatch.sinks[0]);
- subPatch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_MIC;
-
- status_t status = createAudioPatch(&subPatch, &patch->mRecordPatchHandle);
+ status_t status = panel->createAudioPatch(
+ PatchBuilder().addSource(mAudioPatch.sources[0]).
+ addSink(mRecord.thread(), { .source = AUDIO_SOURCE_MIC }).patch(),
+ mRecord.handlePtr());
if (status != NO_ERROR) {
- patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ *mRecord.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
return status;
}
// create patch from playback thread output to sink device
- if (audioPatch->num_sinks != 0) {
- patch->mPlaybackThread->getAudioPortConfig(&subPatch.sources[0]);
- subPatch.sinks[0] = audioPatch->sinks[0];
- status = createAudioPatch(&subPatch, &patch->mPlaybackPatchHandle);
+ if (mAudioPatch.num_sinks != 0) {
+ status = panel->createAudioPatch(
+ PatchBuilder().addSource(mPlayback.thread()).addSink(mAudioPatch.sinks[0]).patch(),
+ mPlayback.handlePtr());
if (status != NO_ERROR) {
- patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ *mPlayback.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
return status;
}
} else {
- patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ *mPlayback.handlePtr() = AUDIO_PATCH_HANDLE_NONE;
}
// use a pseudo LCM between input and output framecount
- size_t playbackFrameCount = patch->mPlaybackThread->frameCount();
+ size_t playbackFrameCount = mPlayback.thread()->frameCount();
int playbackShift = __builtin_ctz(playbackFrameCount);
- size_t recordFramecount = patch->mRecordThread->frameCount();
+ size_t recordFramecount = mRecord.thread()->frameCount();
int shift = __builtin_ctz(recordFramecount);
if (playbackShift < shift) {
shift = playbackShift;
}
size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
- ALOGV("createPatchConnections() playframeCount %zu recordFramecount %zu frameCount %zu",
- playbackFrameCount, recordFramecount, frameCount);
+ ALOGV("%s() playframeCount %zu recordFramecount %zu frameCount %zu",
+ __func__, playbackFrameCount, recordFramecount, frameCount);
// create a special record track to capture from record thread
- uint32_t channelCount = patch->mPlaybackThread->channelCount();
+ uint32_t channelCount = mPlayback.thread()->channelCount();
audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
- audio_channel_mask_t outChannelMask = patch->mPlaybackThread->channelMask();
- uint32_t sampleRate = patch->mPlaybackThread->sampleRate();
- audio_format_t format = patch->mPlaybackThread->format();
+ audio_channel_mask_t outChannelMask = mPlayback.thread()->channelMask();
+ uint32_t sampleRate = mPlayback.thread()->sampleRate();
+ audio_format_t format = mPlayback.thread()->format();
- patch->mPatchRecord = new RecordThread::PatchRecord(
- patch->mRecordThread.get(),
+ audio_format_t inputFormat = mRecord.thread()->format();
+ if (!audio_is_linear_pcm(inputFormat)) {
+ // The playbackThread format will say PCM for IEC61937 packetized stream.
+ // Use recordThread format.
+ format = inputFormat;
+ }
+ audio_input_flags_t inputFlags = mAudioPatch.sources[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+ mAudioPatch.sources[0].flags.input : AUDIO_INPUT_FLAG_NONE;
+ sp<RecordThread::PatchRecord> tempRecordTrack = new (std::nothrow) RecordThread::PatchRecord(
+ mRecord.thread().get(),
sampleRate,
inChannelMask,
format,
frameCount,
NULL,
(size_t)0 /* bufferSize */,
- AUDIO_INPUT_FLAG_NONE);
- if (patch->mPatchRecord == 0) {
- return NO_MEMORY;
- }
- status = patch->mPatchRecord->initCheck();
+ inputFlags);
+ status = mRecord.checkTrack(tempRecordTrack.get());
if (status != NO_ERROR) {
return status;
}
- patch->mRecordThread->addPatchRecord(patch->mPatchRecord);
+
+ audio_output_flags_t outputFlags = mAudioPatch.sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
+ mAudioPatch.sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
// create a special playback track to render to playback thread.
// this track is given the same buffer as the PatchRecord buffer
- patch->mPatchTrack = new PlaybackThread::PatchTrack(
- patch->mPlaybackThread.get(),
- audioPatch->sources[1].ext.mix.usecase.stream,
+ sp<PlaybackThread::PatchTrack> tempPatchTrack = new (std::nothrow) PlaybackThread::PatchTrack(
+ mPlayback.thread().get(),
+ mAudioPatch.sources[1].ext.mix.usecase.stream,
sampleRate,
outChannelMask,
format,
frameCount,
- patch->mPatchRecord->buffer(),
- patch->mPatchRecord->bufferSize(),
- AUDIO_OUTPUT_FLAG_NONE);
- status = patch->mPatchTrack->initCheck();
+ tempRecordTrack->buffer(),
+ tempRecordTrack->bufferSize(),
+ outputFlags);
+ status = mPlayback.checkTrack(tempPatchTrack.get());
if (status != NO_ERROR) {
return status;
}
- patch->mPlaybackThread->addPatchTrack(patch->mPatchTrack);
// tie playback and record tracks together
- patch->mPatchRecord->setPeerProxy(patch->mPatchTrack.get());
- patch->mPatchTrack->setPeerProxy(patch->mPatchRecord.get());
+ mRecord.setTrackAndPeer(tempRecordTrack, tempPatchTrack.get());
+ mPlayback.setTrackAndPeer(tempPatchTrack, tempRecordTrack.get());
// start capture and playback
- patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
- patch->mPatchTrack->start();
+ mRecord.track()->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
+ mPlayback.track()->start();
return status;
}
-void AudioFlinger::PatchPanel::clearPatchConnections(Patch *patch)
+void AudioFlinger::PatchPanel::Patch::clearConnections(PatchPanel *panel)
{
- sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
- if (audioflinger == 0) {
- return;
- }
+ ALOGV("%s() mRecord.handle %d mPlayback.handle %d",
+ __func__, mRecord.handle(), mPlayback.handle());
+ mRecord.stopTrack();
+ mPlayback.stopTrack();
+ mRecord.closeConnections(panel);
+ mPlayback.closeConnections(panel);
+}
- ALOGV("clearPatchConnections() patch->mRecordPatchHandle %d patch->mPlaybackPatchHandle %d",
- patch->mRecordPatchHandle, patch->mPlaybackPatchHandle);
+status_t AudioFlinger::PatchPanel::Patch::getLatencyMs(double *latencyMs) const
+{
+ if (!isSoftware()) return INVALID_OPERATION;
- if (patch->mPatchRecord != 0) {
- patch->mPatchRecord->stop();
- }
- if (patch->mPatchTrack != 0) {
- patch->mPatchTrack->stop();
- }
- if (patch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
- releaseAudioPatch(patch->mRecordPatchHandle);
- patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- }
- if (patch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
- releaseAudioPatch(patch->mPlaybackPatchHandle);
- patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- }
- if (patch->mRecordThread != 0) {
- if (patch->mPatchRecord != 0) {
- patch->mRecordThread->deletePatchRecord(patch->mPatchRecord);
- }
- audioflinger->closeInputInternal_l(patch->mRecordThread);
- }
- if (patch->mPlaybackThread != 0) {
- if (patch->mPatchTrack != 0) {
- patch->mPlaybackThread->deletePatchTrack(patch->mPatchTrack);
- }
- // if num sources == 2 we are reusing an existing playback thread so we do not close it
- if (patch->mAudioPatch.num_sources != 2) {
- audioflinger->closeOutputInternal_l(patch->mPlaybackThread);
- }
- }
- if (patch->mRecordThread != 0) {
- if (patch->mPatchRecord != 0) {
- patch->mPatchRecord.clear();
- }
- patch->mRecordThread.clear();
- }
- if (patch->mPlaybackThread != 0) {
- if (patch->mPatchTrack != 0) {
- patch->mPatchTrack.clear();
- }
- patch->mPlaybackThread.clear();
- }
+ auto recordTrack = mRecord.const_track();
+ if (recordTrack.get() == nullptr) return INVALID_OPERATION;
+ auto playbackTrack = mPlayback.const_track();
+ if (playbackTrack.get() == nullptr) return INVALID_OPERATION;
+
+ // Latency information for tracks may be called without obtaining
+ // the underlying thread lock.
+ //
+ // We use record server latency + playback track latency (generally smaller than the
+ // reverse due to internal biases).
+ //
+ // TODO: is this stable enough? Consider a PatchTrack synchronized version of this.
+ double recordServerLatencyMs;
+ if (recordTrack->getServerLatencyMs(&recordServerLatencyMs) != OK) return INVALID_OPERATION;
+
+ double playbackTrackLatencyMs;
+ if (playbackTrack->getTrackLatencyMs(&playbackTrackLatencyMs) != OK) return INVALID_OPERATION;
+
+ *latencyMs = recordServerLatencyMs + playbackTrackLatencyMs;
+ return OK;
+}
+
+String8 AudioFlinger::PatchPanel::Patch::dump(audio_patch_handle_t myHandle) const
+{
+ // TODO: Consider table dump form for patches, just like tracks.
+ String8 result = String8::format("Patch %d: thread %p => thread %p",
+ myHandle, mRecord.const_thread().get(), mPlayback.const_thread().get());
+
+ // add latency if it exists
+ double latencyMs;
+ if (getLatencyMs(&latencyMs) == OK) {
+ result.appendFormat(" latency: %.2lf", latencyMs);
+ }
+ return result;
}
/* Disconnect a patch */
status_t AudioFlinger::PatchPanel::releaseAudioPatch(audio_patch_handle_t handle)
{
- ALOGV("releaseAudioPatch handle %d", handle);
+ ALOGV("%s handle %d", __func__, handle);
status_t status = NO_ERROR;
- size_t index;
- sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
- if (audioflinger == 0) {
- return NO_INIT;
- }
-
- for (index = 0; index < mPatches.size(); index++) {
- if (handle == mPatches[index]->mHandle) {
- break;
- }
- }
- if (index == mPatches.size()) {
+ auto iter = mPatches.find(handle);
+ if (iter == mPatches.end()) {
return BAD_VALUE;
}
- Patch *removedPatch = mPatches[index];
- mPatches.removeAt(index);
+ Patch &removedPatch = iter->second;
+ const struct audio_patch &patch = removedPatch.mAudioPatch;
- struct audio_patch *patch = &removedPatch->mAudioPatch;
-
- switch (patch->sources[0].type) {
+ const struct audio_port_config &src = patch.sources[0];
+ switch (src.type) {
case AUDIO_PORT_TYPE_DEVICE: {
- audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module;
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
- if (index < 0) {
- ALOGW("releaseAudioPatch() bad src hw module %d", srcModule);
+ sp<DeviceHalInterface> hwDevice = findHwDeviceByModule(src.ext.device.hw_module);
+ if (hwDevice == 0) {
+ ALOGW("%s() bad src hw module %d", __func__, src.ext.device.hw_module);
status = BAD_VALUE;
break;
}
- if (removedPatch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE ||
- removedPatch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
- clearPatchConnections(removedPatch);
+ if (removedPatch.isSoftware()) {
+ removedPatch.clearConnections(this);
break;
}
- if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
- sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
- patch->sinks[0].ext.mix.handle);
+ if (patch.sinks[0].type == AUDIO_PORT_TYPE_MIX) {
+ audio_io_handle_t ioHandle = patch.sinks[0].ext.mix.handle;
+ sp<ThreadBase> thread = mAudioFlinger.checkRecordThread_l(ioHandle);
if (thread == 0) {
- thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+ thread = mAudioFlinger.checkMmapThread_l(ioHandle);
if (thread == 0) {
- ALOGW("releaseAudioPatch() bad capture I/O handle %d",
- patch->sinks[0].ext.mix.handle);
+ ALOGW("%s() bad capture I/O handle %d", __func__, ioHandle);
status = BAD_VALUE;
break;
}
}
- status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
+ status = thread->sendReleaseAudioPatchConfigEvent(removedPatch.mHalHandle);
} else {
- AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
- sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
- status = hwDevice->releaseAudioPatch(removedPatch->mHalHandle);
+ status = hwDevice->releaseAudioPatch(removedPatch.mHalHandle);
}
} break;
case AUDIO_PORT_TYPE_MIX: {
- audio_module_handle_t srcModule = patch->sources[0].ext.mix.hw_module;
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
- if (index < 0) {
- ALOGW("releaseAudioPatch() bad src hw module %d", srcModule);
+ if (findHwDeviceByModule(src.ext.mix.hw_module) == 0) {
+ ALOGW("%s() bad src hw module %d", __func__, src.ext.mix.hw_module);
status = BAD_VALUE;
break;
}
- sp<ThreadBase> thread =
- audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
+ audio_io_handle_t ioHandle = src.ext.mix.handle;
+ sp<ThreadBase> thread = mAudioFlinger.checkPlaybackThread_l(ioHandle);
if (thread == 0) {
- thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+ thread = mAudioFlinger.checkMmapThread_l(ioHandle);
if (thread == 0) {
- ALOGW("releaseAudioPatch() bad playback I/O handle %d",
- patch->sources[0].ext.mix.handle);
+ ALOGW("%s() bad playback I/O handle %d", __func__, ioHandle);
status = BAD_VALUE;
break;
}
}
- status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
+ status = thread->sendReleaseAudioPatchConfigEvent(removedPatch.mHalHandle);
} break;
default:
status = BAD_VALUE;
- break;
}
- delete removedPatch;
+ mPatches.erase(iter);
+ removeSoftwarePatchFromInsertedModules(handle);
return status;
}
-
/* List connected audio ports and they attributes */
status_t AudioFlinger::PatchPanel::listAudioPatches(unsigned int *num_patches __unused,
struct audio_patch *patches __unused)
{
- ALOGV("listAudioPatches");
+ ALOGV(__func__);
return NO_ERROR;
}
-/* Set audio port configuration */
-status_t AudioFlinger::PatchPanel::setAudioPortConfig(const struct audio_port_config *config)
+status_t AudioFlinger::PatchPanel::getDownstreamSoftwarePatches(
+ audio_io_handle_t stream,
+ std::vector<AudioFlinger::PatchPanel::SoftwarePatch> *patches) const
{
- ALOGV("setAudioPortConfig");
-
- sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
- if (audioflinger == 0) {
- return NO_INIT;
+ for (const auto& module : mInsertedModules) {
+ if (module.second.streams.count(stream)) {
+ for (const auto& patchHandle : module.second.sw_patches) {
+ const auto& patch_iter = mPatches.find(patchHandle);
+ if (patch_iter != mPatches.end()) {
+ const Patch &patch = patch_iter->second;
+ patches->emplace_back(*this, patchHandle,
+ patch.mPlayback.const_thread()->id(),
+ patch.mRecord.const_thread()->id());
+ } else {
+ ALOGE("Stale patch handle in the cache: %d", patchHandle);
+ }
+ }
+ return OK;
+ }
}
+ // The stream is not associated with any of inserted modules.
+ return BAD_VALUE;
+}
- audio_module_handle_t module;
- if (config->type == AUDIO_PORT_TYPE_DEVICE) {
- module = config->ext.device.hw_module;
- } else {
- module = config->ext.mix.hw_module;
+void AudioFlinger::PatchPanel::notifyStreamOpened(
+ AudioHwDevice *audioHwDevice, audio_io_handle_t stream)
+{
+ if (audioHwDevice->isInsert()) {
+ mInsertedModules[audioHwDevice->handle()].streams.insert(stream);
}
+}
- ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(module);
+void AudioFlinger::PatchPanel::notifyStreamClosed(audio_io_handle_t stream)
+{
+ for (auto& module : mInsertedModules) {
+ module.second.streams.erase(stream);
+ }
+}
+
+AudioHwDevice* AudioFlinger::PatchPanel::findAudioHwDeviceByModule(audio_module_handle_t module)
+{
+ if (module == AUDIO_MODULE_HANDLE_NONE) return nullptr;
+ ssize_t index = mAudioFlinger.mAudioHwDevs.indexOfKey(module);
if (index < 0) {
- ALOGW("setAudioPortConfig() bad hw module %d", module);
- return BAD_VALUE;
+ ALOGW("%s() bad hw module %d", __func__, module);
+ return nullptr;
+ }
+ return mAudioFlinger.mAudioHwDevs.valueAt(index);
+}
+
+sp<DeviceHalInterface> AudioFlinger::PatchPanel::findHwDeviceByModule(audio_module_handle_t module)
+{
+ AudioHwDevice *audioHwDevice = findAudioHwDeviceByModule(module);
+ return audioHwDevice ? audioHwDevice->hwDevice() : nullptr;
+}
+
+void AudioFlinger::PatchPanel::addSoftwarePatchToInsertedModules(
+ audio_module_handle_t module, audio_patch_handle_t handle)
+{
+ mInsertedModules[module].sw_patches.insert(handle);
+}
+
+void AudioFlinger::PatchPanel::removeSoftwarePatchFromInsertedModules(
+ audio_patch_handle_t handle)
+{
+ for (auto& module : mInsertedModules) {
+ module.second.sw_patches.erase(handle);
+ }
+}
+
+void AudioFlinger::PatchPanel::dump(int fd) const
+{
+ String8 patchPanelDump;
+ const char *indent = " ";
+
+ // Only dump software patches.
+ bool headerPrinted = false;
+ for (const auto& iter : mPatches) {
+ if (iter.second.isSoftware()) {
+ if (!headerPrinted) {
+ patchPanelDump += "\nSoftware patches:\n";
+ headerPrinted = true;
+ }
+ patchPanelDump.appendFormat("%s%s\n", indent, iter.second.dump(iter.first).string());
+ }
}
- AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
- return audioHwDevice->hwDevice()->setAudioPortConfig(config);
+ headerPrinted = false;
+ for (const auto& module : mInsertedModules) {
+ if (!module.second.streams.empty() || !module.second.sw_patches.empty()) {
+ if (!headerPrinted) {
+ patchPanelDump += "\nTracked inserted modules:\n";
+ headerPrinted = true;
+ }
+ String8 moduleDump = String8::format("Module %d: I/O handles: ", module.first);
+ for (const auto& stream : module.second.streams) {
+ moduleDump.appendFormat("%d ", stream);
+ }
+ moduleDump.append("; SW Patches: ");
+ for (const auto& patch : module.second.sw_patches) {
+ moduleDump.appendFormat("%d ", patch);
+ }
+ patchPanelDump.appendFormat("%s%s\n", indent, moduleDump.string());
+ }
+ }
+
+ if (!patchPanelDump.isEmpty()) {
+ write(fd, patchPanelDump.string(), patchPanelDump.size());
+ }
}
} // namespace android
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index d37c0d3..269a398 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -19,13 +19,32 @@
#error This header file should only be included from AudioFlinger.h
#endif
-class PatchPanel : public RefBase {
+
+// PatchPanel is concealed within AudioFlinger, their lifetimes are the same.
+class PatchPanel {
public:
+ class SoftwarePatch {
+ public:
+ SoftwarePatch(const PatchPanel &patchPanel, audio_patch_handle_t patchHandle,
+ audio_io_handle_t playbackThreadHandle, audio_io_handle_t recordThreadHandle)
+ : mPatchPanel(patchPanel), mPatchHandle(patchHandle),
+ mPlaybackThreadHandle(playbackThreadHandle),
+ mRecordThreadHandle(recordThreadHandle) {}
+ SoftwarePatch(const SoftwarePatch&) = default;
+ SoftwarePatch& operator=(const SoftwarePatch&) = default;
- class Patch;
+ // Must be called under AudioFlinger::mLock
+ status_t getLatencyMs_l(double *latencyMs) const;
+ audio_io_handle_t getPlaybackThreadHandle() const { return mPlaybackThreadHandle; };
+ audio_io_handle_t getRecordThreadHandle() const { return mRecordThreadHandle; };
+ private:
+ const PatchPanel &mPatchPanel;
+ const audio_patch_handle_t mPatchHandle;
+ const audio_io_handle_t mPlaybackThreadHandle;
+ const audio_io_handle_t mRecordThreadHandle;
+ };
- explicit PatchPanel(const sp<AudioFlinger>& audioFlinger);
- virtual ~PatchPanel();
+ explicit PatchPanel(AudioFlinger* audioFlinger) : mAudioFlinger(*audioFlinger) {}
/* List connected audio ports and their attributes */
status_t listAudioPorts(unsigned int *num_ports,
@@ -45,46 +64,144 @@
status_t listAudioPatches(unsigned int *num_patches,
struct audio_patch *patches);
- /* Set audio port configuration */
- status_t setAudioPortConfig(const struct audio_port_config *config);
+ // Retrieves all currently estrablished software patches for a stream
+ // opened on an intermediate module.
+ status_t getDownstreamSoftwarePatches(audio_io_handle_t stream,
+ std::vector<SoftwarePatch> *patches) const;
- status_t createPatchConnections(Patch *patch,
- const struct audio_patch *audioPatch);
- void clearPatchConnections(Patch *patch);
+ // Notifies patch panel about all opened and closed streams.
+ void notifyStreamOpened(AudioHwDevice *audioHwDevice, audio_io_handle_t stream);
+ void notifyStreamClosed(audio_io_handle_t stream);
+
+ void dump(int fd) const;
+
+private:
+ template<typename ThreadType, typename TrackType>
+ class Endpoint {
+ public:
+ Endpoint() = default;
+ Endpoint(Endpoint&& other) { *this = std::move(other); }
+ Endpoint& operator=(Endpoint&& other) {
+ ALOGE_IF(mHandle != AUDIO_PATCH_HANDLE_NONE,
+ "A non empty Patch Endpoint leaked, handle %d", mHandle);
+ *this = other;
+ other.mHandle = AUDIO_PATCH_HANDLE_NONE;
+ return *this;
+ }
+
+ status_t checkTrack(TrackType *trackOrNull) const {
+ if (trackOrNull == nullptr) return NO_MEMORY;
+ return trackOrNull->initCheck();
+ }
+ audio_patch_handle_t handle() const { return mHandle; }
+ sp<ThreadType> thread() { return mThread; }
+ sp<TrackType> track() { return mTrack; }
+ sp<const ThreadType> const_thread() const { return mThread; }
+ sp<const TrackType> const_track() const { return mTrack; }
+
+ void closeConnections(PatchPanel *panel) {
+ if (mHandle != AUDIO_PATCH_HANDLE_NONE) {
+ panel->releaseAudioPatch(mHandle);
+ mHandle = AUDIO_PATCH_HANDLE_NONE;
+ }
+ if (mThread != 0) {
+ if (mTrack != 0) {
+ mThread->deletePatchTrack(mTrack);
+ }
+ if (mCloseThread) {
+ panel->mAudioFlinger.closeThreadInternal_l(mThread);
+ }
+ }
+ }
+ audio_patch_handle_t* handlePtr() { return &mHandle; }
+ void setThread(const sp<ThreadType>& thread, bool closeThread = true) {
+ mThread = thread;
+ mCloseThread = closeThread;
+ }
+ void setTrackAndPeer(const sp<TrackType>& track,
+ ThreadBase::PatchProxyBufferProvider *peer) {
+ mTrack = track;
+ mThread->addPatchTrack(mTrack);
+ mTrack->setPeerProxy(peer);
+ }
+ void stopTrack() { if (mTrack) mTrack->stop(); }
+
+ private:
+ Endpoint(const Endpoint&) = default;
+ Endpoint& operator=(const Endpoint&) = default;
+
+ sp<ThreadType> mThread;
+ bool mCloseThread = true;
+ audio_patch_handle_t mHandle = AUDIO_PATCH_HANDLE_NONE;
+ sp<TrackType> mTrack;
+ };
class Patch {
public:
- explicit Patch(const struct audio_patch *patch) :
- mAudioPatch(*patch), mHandle(AUDIO_PATCH_HANDLE_NONE),
- mHalHandle(AUDIO_PATCH_HANDLE_NONE), mRecordPatchHandle(AUDIO_PATCH_HANDLE_NONE),
- mPlaybackPatchHandle(AUDIO_PATCH_HANDLE_NONE) {}
- ~Patch() {}
+ explicit Patch(const struct audio_patch &patch) : mAudioPatch(patch) {}
+ ~Patch();
+ Patch(const Patch&) = delete;
+ Patch(Patch&&) = default;
+ Patch& operator=(const Patch&) = delete;
+ Patch& operator=(Patch&&) = default;
+ status_t createConnections(PatchPanel *panel);
+ void clearConnections(PatchPanel *panel);
+ bool isSoftware() const {
+ return mRecord.handle() != AUDIO_PATCH_HANDLE_NONE ||
+ mPlayback.handle() != AUDIO_PATCH_HANDLE_NONE; }
+
+ // returns the latency of the patch (from record to playback).
+ status_t getLatencyMs(double *latencyMs) const;
+
+ String8 dump(audio_patch_handle_t myHandle) const;
+
+ // Note that audio_patch::id is only unique within a HAL module
struct audio_patch mAudioPatch;
- audio_patch_handle_t mHandle;
// handle for audio HAL patch handle present only when the audio HAL version is >= 3.0
- audio_patch_handle_t mHalHandle;
+ audio_patch_handle_t mHalHandle = AUDIO_PATCH_HANDLE_NONE;
// below members are used by a software audio patch connecting a source device from a
// given audio HW module to a sink device on an other audio HW module.
- // playback thread created by createAudioPatch() and released by clearPatchConnections() if
- // no existing playback thread can be used by the software patch
- sp<PlaybackThread> mPlaybackThread;
- // audio track created by createPatchConnections() and released by clearPatchConnections()
- sp<PlaybackThread::PatchTrack> mPatchTrack;
- // record thread created by createAudioPatch() and released by clearPatchConnections()
- sp<RecordThread> mRecordThread;
- // audio record created by createPatchConnections() and released by clearPatchConnections()
- sp<RecordThread::PatchRecord> mPatchRecord;
- // handle for audio patch connecting source device to record thread input.
- // created by createPatchConnections() and released by clearPatchConnections()
- audio_patch_handle_t mRecordPatchHandle;
- // handle for audio patch connecting playback thread output to sink device
- // created by createPatchConnections() and released by clearPatchConnections()
- audio_patch_handle_t mPlaybackPatchHandle;
-
+ // the objects are created by createConnections() and released by clearConnections()
+ // playback thread is created if no existing playback thread can be used
+ // connects playback thread output to sink device
+ Endpoint<PlaybackThread, PlaybackThread::PatchTrack> mPlayback;
+ // connects source device to record thread input
+ Endpoint<RecordThread, RecordThread::PatchRecord> mRecord;
};
-private:
- const wp<AudioFlinger> mAudioFlinger;
- SortedVector <Patch *> mPatches;
+ AudioHwDevice* findAudioHwDeviceByModule(audio_module_handle_t module);
+ sp<DeviceHalInterface> findHwDeviceByModule(audio_module_handle_t module);
+ void addSoftwarePatchToInsertedModules(
+ audio_module_handle_t module, audio_patch_handle_t handle);
+ void removeSoftwarePatchFromInsertedModules(audio_patch_handle_t handle);
+
+ AudioFlinger &mAudioFlinger;
+ std::map<audio_patch_handle_t, Patch> mPatches;
+
+ // This map allows going from a thread to "downstream" software patches
+ // when a processing module inserted in between. Example:
+ //
+ // from map value.streams map key
+ // [Mixer thread] --> [Virtual output device] --> [Processing module] ---\
+ // [Harware module] <-- [Physical output device] <-- [S/W Patch] <--/
+ // from map value.sw_patches
+ //
+ // This allows the mixer thread to look up the threads of the software patch
+ // for propagating timing info, parameters, etc.
+ //
+ // The current assumptions are:
+ // 1) The processing module acts as a mixer with several outputs which
+ // represent differently downmixed and / or encoded versions of the same
+ // mixed stream. There is no 1:1 correspondence between the input streams
+ // and the software patches, but rather a N:N correspondence between
+ // a group of streams and a group of patches.
+ // 2) There are only a couple of inserted processing modules in the system,
+ // so when looking for a stream or patch handle we can iterate over
+ // all modules.
+ struct ModuleConnections {
+ std::set<audio_io_handle_t> streams;
+ std::set<audio_patch_handle_t> sw_patches;
+ };
+ std::map<audio_module_handle_t, ModuleConnections> mInsertedModules;
};
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index a78be99..4d5f6b0 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -41,7 +41,7 @@
virtual ~Track();
virtual status_t initCheck() const;
- static void appendDumpHeader(String8& result);
+ void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
virtual status_t start(AudioSystem::sync_event_t event =
AudioSystem::SYNC_EVENT_NONE,
@@ -56,6 +56,12 @@
LOG_ALWAYS_FATAL_IF(mName >= 0 && name >= 0,
"%s both old name %d and new name %d are valid", __func__, mName, name);
mName = name;
+#ifdef TEE_SINK
+ mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+ + "_" + std::to_string(mId)
+ + "_" + std::to_string(mName)
+ + "_T");
+#endif
}
virtual uint32_t sampleRate() const;
@@ -65,10 +71,12 @@
}
bool isOffloaded() const
{ return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
- bool isDirect() const { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
+ bool isDirect() const override
+ { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
bool isOffloadedOrDirect() const { return (mFlags
& (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
| AUDIO_OUTPUT_FLAG_DIRECT)) != 0; }
+ bool isStatic() const { return mSharedBuffer.get() != nullptr; }
status_t setParameters(const String8& keyValuePairs);
status_t attachAuxEffect(int EffectId);
@@ -87,6 +95,10 @@
virtual bool isFastTrack() const { return (mFlags & AUDIO_OUTPUT_FLAG_FAST) != 0; }
+ double bufferLatencyMs() const override {
+ return isStatic() ? 0. : TrackBase::bufferLatencyMs();
+ }
+
// implement volume handling.
media::VolumeShaper::Status applyVolumeShaper(
const sp<media::VolumeShaper::Configuration>& configuration,
@@ -140,7 +152,7 @@
bool isResumePending();
void resumeAck();
void updateTrackFrameInfo(int64_t trackFramesReleased, int64_t sinkFramesWritten,
- const ExtendedTimestamp &timeStamp);
+ uint32_t halSampleRate, const ExtendedTimestamp &timeStamp);
sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
@@ -233,7 +245,7 @@
AudioSystem::SYNC_EVENT_NONE,
audio_session_t triggerSession = AUDIO_SESSION_NONE);
virtual void stop();
- bool write(void* data, uint32_t frames);
+ ssize_t write(void* data, uint32_t frames);
bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
bool isActive() const { return mActive; }
const wp<ThreadBase>& thread() const { return mThread; }
@@ -241,6 +253,18 @@
void copyMetadataTo(MetadataInserter& backInserter) const override;
/** Set the metadatas of the upstream tracks. Thread safe. */
void setMetadatas(const SourceMetadatas& metadatas);
+ /** returns client timestamp to the upstream duplicating thread. */
+ ExtendedTimestamp getClientProxyTimestamp() const {
+ // server - kernel difference is not true latency when drained
+ // i.e. mServerProxy->isDrained().
+ ExtendedTimestamp timestamp;
+ (void) mClientProxy->getTimestamp(×tamp);
+ // On success, the timestamp LOCATION_SERVER and LOCATION_KERNEL
+ // entries will be properly filled. If getTimestamp()
+ // is unsuccessful, then a default initialized timestamp
+ // (with mTimeNs[] filled with -1's) is returned.
+ return timestamp;
+ }
private:
status_t obtainBuffer(AudioBufferProvider::Buffer* buffer,
@@ -257,6 +281,7 @@
bool mActive;
DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
sp<AudioTrackClientProxy> mClientProxy;
+
/** Attributes of the source tracks.
*
* This member must be accessed with mTrackMetadatasMutex taken.
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index fc2dbbb..b0c9fda 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -51,7 +51,7 @@
bool setOverflow() { bool tmp = mOverflow; mOverflow = true;
return tmp; }
- static void appendDumpHeader(String8& result);
+ void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
void handleSyncStartEvent(const sp<SyncEvent>& event);
@@ -63,6 +63,8 @@
const ExtendedTimestamp ×tamp);
virtual bool isFastTrack() const { return (mFlags & AUDIO_INPUT_FLAG_FAST) != 0; }
+ bool isDirect() const override
+ { return (mFlags & AUDIO_INPUT_FLAG_DIRECT) != 0; }
void setSilenced(bool silenced) { if (!isPatchTrack()) mSilenced = silenced; }
bool isSilenced() const { return mSilenced; }
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
deleted file mode 100644
index f45ada1..0000000
--- a/services/audioflinger/ServiceUtilities.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <unistd.h>
-
-#include <binder/PermissionController.h>
-
-namespace android {
-
-extern pid_t getpid_cached;
-bool isTrustedCallingUid(uid_t uid);
-bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
-bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
-void finishRecording(const String16& opPackageName, uid_t uid);
-bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
-bool captureHotwordAllowed(pid_t pid, uid_t uid);
-bool settingsAllowed();
-bool modifyAudioRoutingAllowed();
-bool dumpAllowed();
-bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
-}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index dcad866..e3b83f9 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -62,8 +62,8 @@
#include "AudioFlinger.h"
#include "FastMixer.h"
#include "FastCapture.h"
-#include "ServiceUtilities.h"
-#include "mediautils/SchedulingPolicyService.h"
+#include <mediautils/SchedulingPolicyService.h>
+#include <mediautils/ServiceUtilities.h>
#ifdef ADD_BATTERY_DATA
#include <media/IMediaPlayerService.h>
@@ -200,7 +200,7 @@
// Initially this heap is used to allocate client buffers for "fast" AudioRecord.
// Eventually it will be the single buffer that FastCapture writes into via HAL read(),
// and that all "fast" AudioRecord clients read from. In either case, the size can be small.
-static const size_t kRecordThreadReadOnlyHeapSize = 0x4000;
+static const size_t kRecordThreadReadOnlyHeapSize = 0xD000;
// ----------------------------------------------------------------------------
@@ -769,6 +769,8 @@
if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
+ if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, " );
+ if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, " );
if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
} else {
if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
@@ -783,6 +785,12 @@
if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
+ if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, ");
+ if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, ");
+ if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, ");
+ if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low freq, ");
+ if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, " );
+ if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, " );
if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
@@ -845,6 +853,15 @@
dprintf(fd, " Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
+ // Dump timestamp statistics for the Thread types that support it.
+ if (mType == RECORD
+ || mType == MIXER
+ || mType == DUPLICATING
+ || mType == DIRECT
+ || mType == OFFLOAD) {
+ dprintf(fd, " Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
+ }
+
if (locked) {
mLock.unlock();
}
@@ -1519,7 +1536,7 @@
}
}
-void AudioFlinger::ThreadBase::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::ThreadBase::toAudioPortConfig(struct audio_port_config *config)
{
config->type = AUDIO_PORT_TYPE_MIX;
config->ext.mix.handle = mId;
@@ -1571,6 +1588,9 @@
--mBatteryCounter[track->uid()].second;
// mLatestActiveTrack is not cleared even if is the same as track.
mHasChanged = true;
+#ifdef TEE_SINK
+ track->dumpTee(-1 /* fd */, "_REMOVE");
+#endif
return index;
}
@@ -1773,7 +1793,7 @@
if (numtracks) {
dprintf(fd, " of which %zu are active\n", numactive);
result.append(prefix);
- Track::appendDumpHeader(result);
+ mTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numtracks; ++i) {
sp<Track> track = mTracks[i];
if (track != 0) {
@@ -1793,7 +1813,7 @@
result.append(" The following tracks are in the active list but"
" not in the track list\n");
result.append(prefix);
- Track::appendDumpHeader(result);
+ mActiveTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numactive; ++i) {
sp<Track> track = mActiveTracks[i];
if (mTracks.indexOf(track) < 0) {
@@ -1880,11 +1900,17 @@
status_t lStatus;
audio_output_flags_t outputFlags = mOutput->flags;
audio_output_flags_t requestedFlags = *flags;
+ uint32_t sampleRate;
+
+ if (sharedBuffer != 0 && checkIMemory(sharedBuffer) != NO_ERROR) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
if (*pSampleRate == 0) {
*pSampleRate = mSampleRate;
}
- uint32_t sampleRate = *pSampleRate;
+ sampleRate = *pSampleRate;
// special case for FAST flag considered OK if fast mixer is present
if (hasFastMixer()) {
@@ -2298,15 +2324,13 @@
if (track->isExternalTrack()) {
TrackBase::track_state state = track->mState;
mLock.unlock();
- status = AudioSystem::startOutput(mId, track->streamType(),
- track->sessionId());
+ status = AudioSystem::startOutput(track->portId());
mLock.lock();
// abort track was stopped/paused while we released the lock
if (state != track->mState) {
if (status == NO_ERROR) {
mLock.unlock();
- AudioSystem::stopOutput(mId, track->streamType(),
- track->sessionId());
+ AudioSystem::stopOutput(track->portId());
mLock.lock();
}
return INVALID_OPERATION;
@@ -2457,6 +2481,11 @@
Mutex::Autolock _l(mLock);
// reject out of sequence requests
if ((mDrainSequence & 1) && (sequence == mDrainSequence)) {
+ // Register discontinuity when HW drain is completed because that can cause
+ // the timestamp frame position to reset to 0 for direct and offload threads.
+ // (Out of sequence requests are ignored, since the discontinuity would be handled
+ // elsewhere, e.g. in flush).
+ mTimestampVerifier.discontinuity();
mDrainSequence &= ~1;
mWaitWorkCV.signal();
}
@@ -2781,15 +2810,13 @@
for (size_t i = 0 ; i < count ; i++) {
const sp<Track>& track = tracksToRemove.itemAt(i);
if (track->isExternalTrack()) {
- AudioSystem::stopOutput(mId, track->streamType(),
- track->sessionId());
+ AudioSystem::stopOutput(track->portId());
#ifdef ADD_BATTERY_DATA
// to track the speaker usage
addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
#endif
if (track->isTerminated()) {
- AudioSystem::releaseOutput(mId, track->streamType(),
- track->sessionId());
+ AudioSystem::releaseOutput(track->portId());
}
}
}
@@ -2845,6 +2872,9 @@
ATRACE_END();
if (framesWritten > 0) {
bytesWritten = framesWritten * mFrameSize;
+#ifdef TEE_SINK
+ mTee.write((char *)mSinkBuffer + offset, framesWritten);
+#endif
} else {
bytesWritten = framesWritten;
}
@@ -3162,6 +3192,15 @@
checkSilentMode_l();
+ // DIRECT and OFFLOAD threads should reset frame count to zero on stop/flush
+ // TODO: add confirmation checks:
+ // 1) DIRECT threads and linear PCM format really resets to 0?
+ // 2) Is frame count really valid if not linear pcm?
+ // 3) Are all 64 bits of position returned, not just lowest 32 bits?
+ if (mType == OFFLOAD || mType == DIRECT) {
+ mTimestampVerifier.setDiscontinuityMode(mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
+ }
+
while (!exitPending())
{
// Log merge requests are performed during AudioFlinger binder transactions, but
@@ -3185,16 +3224,24 @@
logString = NULL;
}
+ // Collect timestamp statistics for the Playback Thread types that support it.
+ if (mType == MIXER
+ || mType == DUPLICATING
+ || mType == DIRECT
+ || mType == OFFLOAD) { // no indentation
// Gather the framesReleased counters for all active tracks,
// and associate with the sink frames written out. We need
// this to convert the sink timestamp to the track timestamp.
bool kernelLocationUpdate = false;
- if (mNormalSink != 0) {
- // Note: The DuplicatingThread may not have a mNormalSink.
+ ExtendedTimestamp timestamp; // use private copy to fetch
+ if (mStandby) {
+ mTimestampVerifier.discontinuity();
+ } else if (threadloop_getHalTimestamp_l(×tamp) == OK) {
+ mTimestampVerifier.add(timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+ mSampleRate);
// We always fetch the timestamp here because often the downstream
// sink will block while writing.
- ExtendedTimestamp timestamp; // use private copy to fetch
- (void) mNormalSink->getTimestamp(timestamp);
// We keep track of the last valid kernel position in case we are in underrun
// and the normal mixer period is the same as the fast mixer period, or there
@@ -3223,7 +3270,10 @@
+ mSuspendedFrames; // add frames discarded when suspended
mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ } else {
+ mTimestampVerifier.error();
}
+
// mFramesWritten for non-offloaded tracks are contiguous
// even after standby() is called. This is useful for the track frame
// to sink frame mapping.
@@ -3251,10 +3301,12 @@
t->updateTrackFrameInfo(
t->mAudioTrackServerProxy->framesReleased(),
mFramesWritten,
+ mSampleRate,
mTimestamp);
}
}
}
+ } // if (mType ... ) { // no indentation
#if 0
// logFormat example
if (z % 100 == 0) {
@@ -3772,12 +3824,16 @@
destroyTrack_l(track);
}
-void AudioFlinger::PlaybackThread::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::PlaybackThread::toAudioPortConfig(struct audio_port_config *config)
{
- ThreadBase::getAudioPortConfig(config);
+ ThreadBase::toAudioPortConfig(config);
config->role = AUDIO_PORT_ROLE_SOURCE;
config->ext.mix.hw_module = mOutput->audioHwDev->handle();
config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
+ if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.output = mOutput->flags;
+ }
}
// ----------------------------------------------------------------------------
@@ -3858,9 +3914,7 @@
// create a MonoPipe to connect our submix to FastMixer
NBAIO_Format format = mOutputSink->format();
-#ifdef TEE_SINK
- NBAIO_Format origformat = format;
-#endif
+
// adjust format to match that of the Fast Mixer
ALOGV("format changed from %#x to %#x", format.mFormat, fastMixerFormat);
format.mFormat = fastMixerFormat;
@@ -3872,7 +3926,7 @@
MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
const NBAIO_Format offers[1] = {format};
size_t numCounterOffers = 0;
-#if !LOG_NDEBUG || defined(TEE_SINK)
+#if !LOG_NDEBUG
ssize_t index =
#else
(void)
@@ -3883,25 +3937,8 @@
(monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
mPipeSink = monoPipe;
-#ifdef TEE_SINK
- if (mTeeSinkOutputEnabled) {
- // create a Pipe to archive a copy of FastMixer's output for dumpsys
- Pipe *teeSink = new Pipe(mTeeSinkOutputFrames, origformat);
- const NBAIO_Format offers2[1] = {origformat};
- numCounterOffers = 0;
- index = teeSink->negotiate(offers2, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mTeeSink = teeSink;
- PipeReader *teeSource = new PipeReader(*teeSink);
- numCounterOffers = 0;
- index = teeSource->negotiate(offers2, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mTeeSource = teeSource;
- }
-#endif
-
// create fast mixer and configure it initially with just one fast track for our submix
- mFastMixer = new FastMixer();
+ mFastMixer = new FastMixer(mId);
FastMixerStateQueue *sq = mFastMixer->sq();
#ifdef STATE_QUEUE_DUMP
sq->setObserverDump(&mStateQueueObserverDump);
@@ -3927,9 +3964,6 @@
state->mColdFutexAddr = &mFastMixerFutex;
state->mColdGen++;
state->mDumpState = &mFastMixerDumpState;
-#ifdef TEE_SINK
- state->mTeeSink = mTeeSink.get();
-#endif
mFastMixerNBLogWriter = audioFlinger->newWriter_l(kFastMixerLogSize, "FastMixer");
state->mNBLogWriter = mFastMixerNBLogWriter.get();
sq->end();
@@ -3938,7 +3972,7 @@
// start the fast mixer
mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
pid_t tid = mFastMixer->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer, false /*forApp*/);
+ sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
stream()->setHalThreadPriority(kPriorityFastMixer);
#ifdef AUDIO_WATCHDOG
@@ -3947,9 +3981,14 @@
mAudioWatchdog->setDump(&mAudioWatchdogDump);
mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
tid = mAudioWatchdog->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer, false /*forApp*/);
+ sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
#endif
-
+ } else {
+#ifdef TEE_SINK
+ // Only use the MixerThread tee if there is no FastMixer.
+ mTee.set(mOutputSink->format(), NBAIO_Tee::TEE_FLAG_OUTPUT_THREAD);
+ mTee.setId(std::string("_") + std::to_string(mId) + "_M");
+#endif
}
switch (kUseFastMixer) {
@@ -4265,6 +4304,37 @@
mMixerBufferValid = false; // mMixerBuffer has no valid data until appropriate tracks found.
mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found.
+ // DeferredOperations handles statistics after setting mixerStatus.
+ class DeferredOperations {
+ public:
+ DeferredOperations(mixer_state *mixerStatus)
+ : mMixerStatus(mixerStatus) { }
+
+ // when leaving scope, tally frames properly.
+ ~DeferredOperations() {
+ // Tally underrun frames only if we are actually mixing (MIXER_TRACKS_READY)
+ // because that is when the underrun occurs.
+ // We do not distinguish between FastTracks and NormalTracks here.
+ if (*mMixerStatus == MIXER_TRACKS_READY) {
+ for (const auto &underrun : mUnderrunFrames) {
+ underrun.first->mAudioTrackServerProxy->tallyUnderrunFrames(
+ underrun.second);
+ }
+ }
+ }
+
+ // tallyUnderrunFrames() is called to update the track counters
+ // with the number of underrun frames for a particular mixer period.
+ // We defer tallying until we know the final mixer status.
+ void tallyUnderrunFrames(sp<Track> track, size_t underrunFrames) {
+ mUnderrunFrames.emplace_back(track, underrunFrames);
+ }
+
+ private:
+ const mixer_state * const mMixerStatus;
+ std::vector<std::pair<sp<Track>, size_t>> mUnderrunFrames;
+ } deferredOperations(&mixerStatus); // implicit nested scope for variable capture
+
for (size_t i=0 ; i<count ; i++) {
const sp<Track> t = mActiveTracks[i];
@@ -4299,13 +4369,14 @@
track->mObservedUnderruns = underruns;
// don't count underruns that occur while stopping or pausing
// or stopped which can occur when flush() is called while active
+ size_t underrunFrames = 0;
if (!(track->isStopping() || track->isPausing() || track->isStopped()) &&
recentUnderruns > 0) {
// FIXME fast mixer will pull & mix partial buffers, but we count as a full underrun
- track->mAudioTrackServerProxy->tallyUnderrunFrames(recentUnderruns * mFrameCount);
- } else {
- track->mAudioTrackServerProxy->tallyUnderrunFrames(0);
+ underrunFrames = recentUnderruns * mFrameCount;
}
+ // Immediately account for FastTrack underruns.
+ track->mAudioTrackServerProxy->tallyUnderrunFrames(underrunFrames);
// This is similar to the state machine for normal tracks,
// with a few modifications for fast tracks.
@@ -4720,13 +4791,13 @@
mixerStatus = MIXER_TRACKS_READY;
}
} else {
+ size_t underrunFrames = 0;
if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
ALOGV("track(%p) underrun, framesReady(%zu) < framesDesired(%zd)",
track, framesReady, desiredFrames);
- track->mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
- } else {
- track->mAudioTrackServerProxy->tallyUnderrunFrames(0);
+ underrunFrames = desiredFrames;
}
+ deferredOperations.tallyUnderrunFrames(track, underrunFrames);
// clear effect chain input buffer if an active track underruns to avoid sending
// previous audio buffer again to effects
@@ -5025,6 +5096,12 @@
dprintf(fd, " Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
dprintf(fd, " AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
dprintf(fd, " Master mono: %s\n", mMasterMono ? "on" : "off");
+ const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
+ if (latencyMs != 0.) {
+ dprintf(fd, " NormalMixer latency ms: %.2lf\n", latencyMs);
+ } else {
+ dprintf(fd, " NormalMixer latency ms: unavail\n");
+ }
if (hasFastMixer()) {
dprintf(fd, " FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
@@ -5056,12 +5133,6 @@
} else {
dprintf(fd, " No FastMixer\n");
}
-
-#ifdef TEE_SINK
- // Write the tee output to a .wav file
- dumpTee(fd, mTeeSource, mId, 'M');
-#endif
-
}
uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
@@ -5563,6 +5634,7 @@
mOutput->flush();
mHwPaused = false;
mFlushPending = false;
+ mTimestampVerifier.discontinuity(); // DIRECT and OFFLOADED flush resets frame count.
}
int64_t AudioFlinger::DirectOutputThread::computeWaitTimeNs_l() const {
@@ -5897,6 +5969,14 @@
track->presentationComplete(framesWritten, audioHALFrames);
track->reset();
tracksToRemove->add(track);
+ // DIRECT and OFFLOADED stop resets frame counts.
+ if (!mUseAsyncWrite) {
+ // If we don't get explicit drain notification we must
+ // register discontinuity regardless of whether this is
+ // the previous (!last) or the upcoming (last) track
+ // to avoid skipping the discontinuity.
+ mTimestampVerifier.discontinuity();
+ }
}
} else {
// No buffers for this track. Give it a few chances to
@@ -6062,7 +6142,22 @@
ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
{
for (size_t i = 0; i < outputTracks.size(); i++) {
- outputTracks[i]->write(mSinkBuffer, writeFrames);
+ const ssize_t actualWritten = outputTracks[i]->write(mSinkBuffer, writeFrames);
+
+ // Consider the first OutputTrack for timestamp and frame counting.
+
+ // The threadLoop() generally assumes writing a full sink buffer size at a time.
+ // Here, we correct for writeFrames of 0 (a stop) or underruns because
+ // we always claim success.
+ if (i == 0) {
+ const ssize_t correction = mSinkBufferSize / mFrameSize - actualWritten;
+ ALOGD_IF(correction != 0 && writeFrames != 0,
+ "%s: writeFrames:%u actualWritten:%zd correction:%zd mFramesWritten:%lld",
+ __func__, writeFrames, actualWritten, correction, (long long)mFramesWritten);
+ mFramesWritten -= correction;
+ }
+
+ // TODO: Report correction for the other output tracks and show in the dump.
}
mStandby = false;
return (ssize_t)mSinkBufferSize;
@@ -6227,9 +6322,6 @@
audio_devices_t outDevice,
audio_devices_t inDevice,
bool systemReady
-#ifdef TEE_SINK
- , const sp<NBAIO_Sink>& teeSink
-#endif
) :
ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD, systemReady),
mInput(input),
@@ -6237,9 +6329,6 @@
mRsmpInBuffer(NULL),
// mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
mRsmpInRear(0)
-#ifdef TEE_SINK
- , mTeeSink(teeSink)
-#endif
, mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize,
"RecordThreadRO", MemoryHeapBase::READ_ONLY))
// mFastCapture below
@@ -6354,7 +6443,7 @@
// start the fast capture
mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
pid_t tid = mFastCapture->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture, false /*forApp*/);
+ sendPrioConfigEvent(getpid(), tid, kPriorityFastCapture, false /*forApp*/);
stream()->setHalThreadPriority(kPriorityFastCapture);
#ifdef AUDIO_WATCHDOG
// FIXME
@@ -6362,6 +6451,10 @@
mFastTrackAvail = true;
}
+#ifdef TEE_SINK
+ mTee.set(mInputSource->format(), NBAIO_Tee::TEE_FLAG_INPUT_THREAD);
+ mTee.setId(std::string("_") + std::to_string(mId) + "_C");
+#endif
failed: ;
// FIXME mNormalSource
@@ -6679,8 +6772,9 @@
// Update server timestamp with kernel stats
if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
int64_t position, time;
- int ret = mInput->stream->getCapturePosition(&position, &time);
- if (ret == NO_ERROR) {
+ if (mStandby) {
+ mTimestampVerifier.discontinuity();
+ } else if (mInput->stream->getCapturePosition(&position, &time) == NO_ERROR) {
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
// Note: In general record buffers should tend to be empty in
@@ -6688,6 +6782,12 @@
//
// Also, it is not advantageous to call get_presentation_position during the read
// as the read obtains a lock, preventing the timestamp call from executing.
+
+ mTimestampVerifier.add(mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
+ mSampleRate);
+ } else {
+ mTimestampVerifier.error();
}
}
// Use this to track timestamp information
@@ -6704,9 +6804,9 @@
}
ALOG_ASSERT(framesRead > 0);
- if (mTeeSink != 0) {
- (void) mTeeSink->write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
- }
+#ifdef TEE_SINK
+ (void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
+#endif
// If destination is non-contiguous, we now correct for reading past end of buffer.
{
size_t part1 = mRsmpInFramesP2 - rear;
@@ -6764,9 +6864,33 @@
framesOut = min(framesOut,
destinationFramesPossible(
framesIn, mSampleRate, activeTrack->mSampleRate));
- // process frames from the RecordThread buffer provider to the RecordTrack buffer
- framesOut = activeTrack->mRecordBufferConverter->convert(
- activeTrack->mSink.raw, activeTrack->mResamplerBufferProvider, framesOut);
+
+ if (activeTrack->isDirect()) {
+ // No RecordBufferConverter used for compressed formats. Pass
+ // straight from RecordThread buffer to RecordTrack buffer.
+ AudioBufferProvider::Buffer buffer;
+ buffer.frameCount = framesOut;
+ status_t status = activeTrack->mResamplerBufferProvider->getNextBuffer(&buffer);
+ if (status == OK && buffer.frameCount != 0) {
+ ALOGV_IF(buffer.frameCount != framesOut,
+ "%s() read less than expected (%zu vs %zu)",
+ __func__, buffer.frameCount, framesOut);
+ framesOut = buffer.frameCount;
+ memcpy(activeTrack->mSink.raw, buffer.raw, buffer.frameCount);
+ activeTrack->mResamplerBufferProvider->releaseBuffer(&buffer);
+ } else {
+ framesOut = 0;
+ ALOGE("%s() cannot fill request, status: %d, frameCount: %zu",
+ __func__, status, buffer.frameCount);
+ }
+ } else {
+ // process frames from the RecordThread buffer provider to the RecordTrack
+ // buffer
+ framesOut = activeTrack->mRecordBufferConverter->convert(
+ activeTrack->mSink.raw,
+ activeTrack->mResamplerBufferProvider,
+ framesOut);
+ }
if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
overrun = OVERRUN_FALSE;
@@ -6937,6 +7061,12 @@
goto Exit;
}
+ if (!audio_is_linear_pcm(mFormat) && (*flags & AUDIO_INPUT_FLAG_DIRECT) == 0) {
+ ALOGE("createRecordTrack_l() on an encoded stream requires AUDIO_INPUT_FLAG_DIRECT");
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+
if (*pSampleRate == 0) {
*pSampleRate = mSampleRate;
}
@@ -7138,8 +7268,10 @@
// see previously buffered data before it called start(), but with greater risk of overrun.
recordTrack->mResamplerBufferProvider->reset();
- // clear any converter state as new data will be discontinuous
- recordTrack->mRecordBufferConverter->reset();
+ if (!recordTrack->isDirect()) {
+ // clear any converter state as new data will be discontinuous
+ recordTrack->mRecordBufferConverter->reset();
+ }
recordTrack->mState = TrackBase::STARTING_2;
// signal thread to start
mWaitWorkCV.broadcast();
@@ -7304,6 +7436,13 @@
(void)input->stream->dump(fd);
}
+ const double latencyMs = - mTimestamp.getOutputServerLatencyMs(mSampleRate);
+ if (latencyMs != 0.) {
+ dprintf(fd, " NormalRecord latency ms: %.2lf\n", latencyMs);
+ } else {
+ dprintf(fd, " NormalRecord latency ms: unavail\n");
+ }
+
dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
@@ -7327,7 +7466,7 @@
if (numtracks) {
dprintf(fd, " of which %zu are active\n", numactive);
result.append(prefix);
- RecordTrack::appendDumpHeader(result);
+ mTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numtracks ; ++i) {
sp<RecordTrack> track = mTracks[i];
if (track != 0) {
@@ -7347,7 +7486,7 @@
result.append(" The following tracks are in the active list but"
" not in the track list\n");
result.append(prefix);
- RecordTrack::appendDumpHeader(result);
+ mActiveTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numactive; ++i) {
sp<RecordTrack> track = mActiveTracks[i];
if (mTracks.indexOf(track) < 0) {
@@ -7642,10 +7781,15 @@
{
status_t result = mInput->stream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
- mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
- LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d", mChannelCount, FCC_8);
mFormat = mHALFormat;
- LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
+ mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+ if (audio_is_linear_pcm(mFormat)) {
+ LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d",
+ mChannelCount, FCC_8);
+ } else {
+ // Can have more that FCC_8 channels in encoded streams.
+ ALOGI("HAL format %#x is not linear pcm", mFormat);
+ }
result = mInput->stream->getFrameSize(&mFrameSize);
LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
result = mInput->stream->getBufferSize(&mBufferSize);
@@ -7855,24 +7999,28 @@
return status;
}
-void AudioFlinger::RecordThread::addPatchRecord(const sp<PatchRecord>& record)
+void AudioFlinger::RecordThread::addPatchTrack(const sp<PatchRecord>& record)
{
Mutex::Autolock _l(mLock);
mTracks.add(record);
}
-void AudioFlinger::RecordThread::deletePatchRecord(const sp<PatchRecord>& record)
+void AudioFlinger::RecordThread::deletePatchTrack(const sp<PatchRecord>& record)
{
Mutex::Autolock _l(mLock);
destroyTrack_l(record);
}
-void AudioFlinger::RecordThread::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::RecordThread::toAudioPortConfig(struct audio_port_config *config)
{
- ThreadBase::getAudioPortConfig(config);
+ ThreadBase::toAudioPortConfig(config);
config->role = AUDIO_PORT_ROLE_SINK;
config->ext.mix.hw_module = mInput->audioHwDev->handle();
config->ext.mix.usecase.source = mAudioSource;
+ if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.input = mInput->flags;
+ }
}
// ----------------------------------------------------------------------------
@@ -7959,7 +8107,7 @@
}
// This will decrement references and may cause the destruction of this thread.
if (isOutput()) {
- AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+ AudioSystem::releaseOutput(mPortId);
} else {
AudioSystem::releaseInput(mPortId);
}
@@ -8073,7 +8221,7 @@
bool silenced = false;
if (isOutput()) {
- ret = AudioSystem::startOutput(mId, streamType(), mSessionId);
+ ret = AudioSystem::startOutput(portId);
} else {
ret = AudioSystem::startInput(portId, &silenced);
}
@@ -8085,7 +8233,7 @@
if (mActiveTracks.size() != 0) {
mLock.unlock();
if (isOutput()) {
- AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+ AudioSystem::releaseOutput(portId);
} else {
AudioSystem::releaseInput(portId);
}
@@ -8108,7 +8256,7 @@
// Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
sp<MmapTrack> track = new MmapTrack(this, mAttr, mSampleRate, mFormat, mChannelMask, mSessionId,
- client.clientUid, client.clientPid, portId);
+ isOutput(), client.clientUid, client.clientPid, portId);
track->setSilenced_l(silenced);
mActiveTracks.add(track);
@@ -8157,8 +8305,8 @@
mLock.unlock();
if (isOutput()) {
- AudioSystem::stopOutput(mId, streamType(), track->sessionId());
- AudioSystem::releaseOutput(mId, streamType(), track->sessionId());
+ AudioSystem::stopOutput(track->portId());
+ AudioSystem::releaseOutput(track->portId());
} else {
AudioSystem::stopInput(track->portId());
AudioSystem::releaseInput(track->portId());
@@ -8454,9 +8602,9 @@
return status;
}
-void AudioFlinger::MmapThread::getAudioPortConfig(struct audio_port_config *config)
+void AudioFlinger::MmapThread::toAudioPortConfig(struct audio_port_config *config)
{
- ThreadBase::getAudioPortConfig(config);
+ ThreadBase::toAudioPortConfig(config);
if (isOutput()) {
config->role = AUDIO_PORT_ROLE_SOURCE;
config->ext.mix.hw_module = mAudioHwDev->handle();
@@ -8634,7 +8782,7 @@
const char *prefix = " ";
if (numtracks) {
result.append(prefix);
- MmapTrack::appendDumpHeader(result);
+ mActiveTracks[0]->appendDumpHeader(result);
for (size_t i = 0; i < numtracks ; ++i) {
sp<MmapTrack> track = mActiveTracks[i];
result.append(prefix);
@@ -8840,6 +8988,15 @@
}
}
+void AudioFlinger::MmapPlaybackThread::toAudioPortConfig(struct audio_port_config *config)
+{
+ MmapThread::toAudioPortConfig(config);
+ if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.output = mOutput->flags;
+ }
+}
+
void AudioFlinger::MmapPlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
{
MmapThread::dumpInternals(fd, args);
@@ -8930,4 +9087,13 @@
}
}
+void AudioFlinger::MmapCaptureThread::toAudioPortConfig(struct audio_port_config *config)
+{
+ MmapThread::toAudioPortConfig(config);
+ if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
+ config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ config->flags.input = mInput->flags;
+ }
+}
+
} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 28d4482..0c833f1 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -281,7 +281,7 @@
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle) = 0;
virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle) = 0;
- virtual void getAudioPortConfig(struct audio_port_config *config) = 0;
+ virtual void toAudioPortConfig(struct audio_port_config *config) = 0;
// see note at declaration of mStandby, mOutDevice and mInDevice
@@ -434,6 +434,12 @@
virtual void setMasterMono_l(bool mono __unused) { }
virtual bool requireMonoBlend() { return false; }
+ // called within the threadLoop to obtain timestamp from the HAL.
+ virtual status_t threadloop_getHalTimestamp_l(
+ ExtendedTimestamp *timestamp __unused) const {
+ return INVALID_OPERATION;
+ }
+
friend class AudioFlinger; // for mEffectChains
const type_t mType;
@@ -493,10 +499,16 @@
sp<NBLog::Writer> mNBLogWriter;
bool mSystemReady;
ExtendedTimestamp mTimestamp;
+ TimestampVerifier< // For timestamp statistics.
+ int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
+
// A condition that must be evaluated by the thread loop has changed and
// we must not wait for async write callback in the thread loop before evaluating it
bool mSignalPending;
+#ifdef TEE_SINK
+ NBAIO_Tee mTee;
+#endif
// ActiveTracks is a sorted vector of track type T representing the
// active tracks of threadLoop() to be considered by the locked prepare portion.
// ActiveTracks should be accessed with the ThreadBase lock held.
@@ -782,7 +794,7 @@
void addPatchTrack(const sp<PatchTrack>& track);
void deletePatchTrack(const sp<PatchTrack>& track);
- virtual void getAudioPortConfig(struct audio_port_config *config);
+ virtual void toAudioPortConfig(struct audio_port_config *config);
// Return the asynchronous signal wait time.
virtual int64_t computeWaitTimeNs_l() const { return INT64_MAX; }
@@ -1054,11 +1066,6 @@
sp<NBAIO_Sink> mPipeSink;
// The current sink for the normal mixer to write it's (sub)mix, mOutputSink or mPipeSink
sp<NBAIO_Sink> mNormalSink;
-#ifdef TEE_SINK
- // For dumpsys
- sp<NBAIO_Sink> mTeeSink;
- sp<NBAIO_Source> mTeeSource;
-#endif
uint32_t mScreenState; // cached copy of gScreenState
// TODO: add comment and adjust size as needed
static const size_t kFastMixerLogSize = 8 * 1024;
@@ -1152,6 +1159,14 @@
return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
}
+ status_t threadloop_getHalTimestamp_l(
+ ExtendedTimestamp *timestamp) const override {
+ if (mNormalSink.get() != nullptr) {
+ return mNormalSink->getTimestamp(*timestamp);
+ }
+ return INVALID_OPERATION;
+ }
+
protected:
virtual void setMasterMono_l(bool mono) {
mMasterMono.store(mono);
@@ -1209,6 +1224,23 @@
virtual bool hasFastMixer() const { return false; }
virtual int64_t computeWaitTimeNs_l() const override;
+
+ status_t threadloop_getHalTimestamp_l(ExtendedTimestamp *timestamp) const override {
+ // For DIRECT and OFFLOAD threads, query the output sink directly.
+ if (mOutput != nullptr) {
+ uint64_t uposition64;
+ struct timespec time;
+ if (mOutput->getPresentationPosition(
+ &uposition64, &time) == OK) {
+ timestamp->mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+ = (int64_t)uposition64;
+ timestamp->mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
+ = audio_utils_ns_from_timespec(&time);
+ return NO_ERROR;
+ }
+ }
+ return INVALID_OPERATION;
+ }
};
class OffloadThread : public DirectOutputThread {
@@ -1316,6 +1348,22 @@
SortedVector < sp<OutputTrack> > mOutputTracks;
public:
virtual bool hasFastMixer() const { return false; }
+ status_t threadloop_getHalTimestamp_l(
+ ExtendedTimestamp *timestamp) const override {
+ if (mOutputTracks.size() > 0) {
+ // forward the first OutputTrack's kernel information for timestamp.
+ const ExtendedTimestamp trackTimestamp =
+ mOutputTracks[0]->getClientProxyTimestamp();
+ if (trackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0) {
+ timestamp->mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+ trackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ timestamp->mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+ trackTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+ return OK; // discard server timestamp - that's ignored.
+ }
+ }
+ return INVALID_OPERATION;
+ }
};
// record thread
@@ -1374,9 +1422,6 @@
audio_devices_t outDevice,
audio_devices_t inDevice,
bool systemReady
-#ifdef TEE_SINK
- , const sp<NBAIO_Sink>& teeSink
-#endif
);
virtual ~RecordThread();
@@ -1437,8 +1482,8 @@
audio_patch_handle_t *handle);
virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle);
- void addPatchRecord(const sp<PatchRecord>& record);
- void deletePatchRecord(const sp<PatchRecord>& record);
+ void addPatchTrack(const sp<PatchRecord>& record);
+ void deletePatchTrack(const sp<PatchRecord>& record);
void readInputParameters_l();
virtual uint32_t getInputFramesLost();
@@ -1459,7 +1504,7 @@
virtual size_t frameCount() const { return mFrameCount; }
bool hasFastCapture() const { return mFastCapture != 0; }
- virtual void getAudioPortConfig(struct audio_port_config *config);
+ virtual void toAudioPortConfig(struct audio_port_config *config);
virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
audio_session_t sessionId);
@@ -1506,8 +1551,6 @@
int32_t mRsmpInRear; // last filled frame + 1
// For dumpsys
- const sp<NBAIO_Sink> mTeeSink;
-
const sp<MemoryDealer> mReadOnlyHeap;
// one-time initialization, no locks required
@@ -1602,7 +1645,7 @@
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle);
virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle);
- virtual void getAudioPortConfig(struct audio_port_config *config);
+ virtual void toAudioPortConfig(struct audio_port_config *config);
virtual sp<StreamHalInterface> stream() const { return mHalStream; }
virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
@@ -1686,6 +1729,8 @@
void updateMetadata_l() override;
+ virtual void toAudioPortConfig(struct audio_port_config *config);
+
protected:
audio_stream_type_t mStreamType;
@@ -1714,6 +1759,8 @@
void processVolume_l() override;
void setRecordSilenced(uid_t uid, bool silenced) override;
+ virtual void toAudioPortConfig(struct audio_port_config *config);
+
protected:
AudioStreamIn* mInput;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index ccfb69f..95da9d7 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -91,6 +91,7 @@
void* buffer() const { return mBuffer; }
size_t bufferSize() const { return mBufferSize; }
virtual bool isFastTrack() const = 0;
+ virtual bool isDirect() const = 0;
bool isOutputTrack() const { return (mType == TYPE_OUTPUT); }
bool isPatchTrack() const { return (mType == TYPE_PATCH); }
bool isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
@@ -100,6 +101,92 @@
audio_attributes_t attributes() const { return mAttr; }
+#ifdef TEE_SINK
+ void dumpTee(int fd, const std::string &reason) const {
+ mTee.dump(fd, reason);
+ }
+#endif
+
+ /** returns the buffer contents size converted to time in milliseconds
+ * for PCM Playback or Record streaming tracks. The return value is zero for
+ * PCM static tracks and not defined for non-PCM tracks.
+ *
+ * This may be called without the thread lock.
+ */
+ virtual double bufferLatencyMs() const {
+ return mServerProxy->framesReadySafe() * 1000 / sampleRate();
+ }
+
+ /** returns whether the track supports server latency computation.
+ * This is set in the constructor and constant throughout the track lifetime.
+ */
+
+ bool isServerLatencySupported() const { return mServerLatencySupported; }
+
+ /** computes the server latency for PCM Playback or Record track
+ * to the device sink/source. This is the time for the next frame in the track buffer
+ * written or read from the server thread to the device source or sink.
+ *
+ * This may be called without the thread lock, but latencyMs and fromTrack
+ * may be not be synchronized. For example PatchPanel may not obtain the
+ * thread lock before calling.
+ *
+ * \param latencyMs on success is set to the latency in milliseconds of the
+ * next frame written/read by the server thread to/from the track buffer
+ * from the device source/sink.
+ * \param fromTrack on success is set to true if latency was computed directly
+ * from the track timestamp; otherwise set to false if latency was
+ * estimated from the server timestamp.
+ * fromTrack may be nullptr or omitted if not required.
+ *
+ * \returns OK or INVALID_OPERATION on failure.
+ */
+ status_t getServerLatencyMs(double *latencyMs, bool *fromTrack = nullptr) const {
+ if (!isServerLatencySupported()) {
+ return INVALID_OPERATION;
+ }
+
+ // if no thread lock is acquired, these atomics are not
+ // synchronized with each other, considered a benign race.
+
+ const double serverLatencyMs = mServerLatencyMs.load();
+ if (serverLatencyMs == 0.) {
+ return INVALID_OPERATION;
+ }
+ if (fromTrack != nullptr) {
+ *fromTrack = mServerLatencyFromTrack.load();
+ }
+ *latencyMs = serverLatencyMs;
+ return OK;
+ }
+
+ /** computes the total client latency for PCM Playback or Record tracks
+ * for the next client app access to the device sink/source; i.e. the
+ * server latency plus the buffer latency.
+ *
+ * This may be called without the thread lock, but latencyMs and fromTrack
+ * may be not be synchronized. For example PatchPanel may not obtain the
+ * thread lock before calling.
+ *
+ * \param latencyMs on success is set to the latency in milliseconds of the
+ * next frame written/read by the client app to/from the track buffer
+ * from the device sink/source.
+ * \param fromTrack on success is set to true if latency was computed directly
+ * from the track timestamp; otherwise set to false if latency was
+ * estimated from the server timestamp.
+ * fromTrack may be nullptr or omitted if not required.
+ *
+ * \returns OK or INVALID_OPERATION on failure.
+ */
+ status_t getTrackLatencyMs(double *latencyMs, bool *fromTrack = nullptr) const {
+ double serverLatencyMs;
+ status_t status = getServerLatencyMs(&serverLatencyMs, fromTrack);
+ if (status == OK) {
+ *latencyMs = serverLatencyMs + bufferLatencyMs();
+ }
+ return status;
+ }
+
protected:
DISALLOW_COPY_AND_ASSIGN(TrackBase);
@@ -208,13 +295,18 @@
const bool mIsOut;
sp<ServerProxy> mServerProxy;
const int mId;
- sp<NBAIO_Sink> mTeeSink;
- sp<NBAIO_Source> mTeeSource;
+#ifdef TEE_SINK
+ NBAIO_Tee mTee;
+#endif
bool mTerminated;
track_type mType; // must be one of TYPE_DEFAULT, TYPE_OUTPUT, TYPE_PATCH ...
audio_io_handle_t mThreadIoHandle; // I/O handle of the thread the track is attached to
audio_port_handle_t mPortId; // unique ID for this track used by audio policy
bool mIsInvalid; // non-resettable latch, set by invalidate()
+
+ bool mServerLatencySupported = false;
+ std::atomic<bool> mServerLatencyFromTrack{}; // latency from track or server timestamp.
+ std::atomic<double> mServerLatencyMs{}; // last latency pushed from server thread.
};
// PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index a7c4253..8b9485f 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -28,11 +28,11 @@
#include <private/media/AudioTrackShared.h>
#include "AudioFlinger.h"
-#include "ServiceUtilities.h"
#include <media/nbaio/Pipe.h>
#include <media/nbaio/PipeReader.h>
#include <media/RecordBufferConverter.h>
+#include <mediautils/ServiceUtilities.h>
#include <audio_utils/minifloat.h>
// ----------------------------------------------------------------------------
@@ -102,7 +102,7 @@
mIsInvalid(false)
{
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isTrustedCallingUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
+ if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
clientUid = callingUid;
@@ -210,22 +210,7 @@
mBufferSize = bufferSize;
#ifdef TEE_SINK
- if (mTeeSinkTrackEnabled) {
- NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount, mFormat);
- if (Format_isValid(pipeFormat)) {
- Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
- size_t numCounterOffers = 0;
- const NBAIO_Format offers[1] = {pipeFormat};
- ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- PipeReader *pipeReader = new PipeReader(*pipe);
- numCounterOffers = 0;
- index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mTeeSink = pipe;
- mTeeSource = pipeReader;
- }
- }
+ mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
#endif
}
@@ -244,9 +229,6 @@
AudioFlinger::ThreadBase::TrackBase::~TrackBase()
{
-#ifdef TEE_SINK
- dumpTee(-1, mTeeSource, mId, 'T');
-#endif
// delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
mServerProxy.clear();
if (mCblk != NULL) {
@@ -274,9 +256,7 @@
void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
{
#ifdef TEE_SINK
- if (mTeeSink != 0) {
- (void) mTeeSink->write(buffer->raw, buffer->frameCount);
- }
+ mTee.write(buffer->raw, buffer->frameCount);
#endif
ServerProxy::Buffer buf;
@@ -454,6 +434,14 @@
thread->mFastTrackAvailMask &= ~(1 << i);
}
mName = TRACK_NAME_PENDING;
+
+ mServerLatencySupported = thread->type() == ThreadBase::MIXER
+ || thread->type() == ThreadBase::DUPLICATING;
+#ifdef TEE_SINK
+ mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+ + "_" + std::to_string(mId) +
+ + "_PEND_T");
+#endif
}
AudioFlinger::PlaybackThread::Track::~Track()
@@ -498,18 +486,20 @@
wasActive = playbackThread->destroyTrack_l(this);
}
if (isExternalTrack() && !wasActive) {
- AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, mSessionId);
+ AudioSystem::releaseOutput(mPortId);
}
}
}
-/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
+void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
{
- result.append("T Name Active Client Session S Flags "
- " Format Chn mask SRate "
- "ST L dB R dB VS dB "
- " Server FrmCnt FrmRdy F Underruns Flushed "
- "Main Buf Aux Buf\n");
+ result.appendFormat("T Name Active Client Session S Flags "
+ " Format Chn mask SRate "
+ "ST Usg CT "
+ " G db L dB R dB VS dB "
+ " Server FrmCnt FrmRdy F Underruns Flushed"
+ "%s\n",
+ isServerLatencySupported() ? " Latency" : "");
}
void AudioFlinger::PlaybackThread::Track::appendDump(String8& result, bool active)
@@ -518,7 +508,7 @@
switch (mType) {
case TYPE_DEFAULT:
case TYPE_OUTPUT:
- if (mSharedBuffer.get() != nullptr) {
+ if (isStatic()) {
trackType = 'S'; // static
} else {
trackType = ' '; // normal
@@ -594,21 +584,25 @@
? 'e' /* error */ : ' ' /* identical */;
result.appendFormat("%7s %6u %7u %2s 0x%03X "
- "%08X %08X %6u "
- "%2u %5.2g %5.2g %5.2g%c "
- "%08X %6zu%c %6zu %c %9u%c %7u "
- "%08zX %08zX\n",
+ "%08X %08X %6u "
+ "%2u %3x %2x "
+ "%5.2g %5.2g %5.2g %5.2g%c "
+ "%08X %6zu%c %6zu %c %9u%c %7u",
active ? "yes" : "no",
- (mClient == 0) ? getpid_cached : mClient->pid(),
+ (mClient == 0) ? getpid() : mClient->pid(),
mSessionId,
getTrackStateString(),
mCblk->mFlags,
mFormat,
mChannelMask,
- mAudioTrackServerProxy->getSampleRate(),
+ sampleRate(),
mStreamType,
+ mAttr.usage,
+ mAttr.content_type,
+
+ 20.0 * log10(mFinalVolume),
20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
@@ -621,11 +615,21 @@
fillingStatus,
mAudioTrackServerProxy->getUnderrunFrames(),
nowInUnderrun,
- (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000,
-
- (size_t)mMainBuffer, // use %zX as %p appends 0x
- (size_t)mAuxBuffer // use %zX as %p appends 0x
+ (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000
);
+
+ if (isServerLatencySupported()) {
+ double latencyMs;
+ bool fromTrack;
+ if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
+ // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
+ // or 'k' if estimated from kernel because track frames haven't been presented yet.
+ result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
+ } else {
+ result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
+ }
+ }
+ result.append("\n");
}
uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
@@ -682,6 +686,13 @@
mAudioTrackServerProxy->setTimestamp(timestamp);
// We do not set drained here, as FastTrack timestamp may not go to very last frame.
+
+ // Compute latency.
+ // TODO: Consider whether the server latency may be passed in by FastMixer
+ // as a constant for all active FastTracks.
+ const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
+ mServerLatencyFromTrack.store(true);
+ mServerLatencyMs.store(latencyMs);
}
// Don't call for fast tracks; the framesReady() could result in priority inversion
@@ -985,7 +996,7 @@
// Signal thread to fetch new volume.
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
- Mutex::Autolock _l(thread->mLock);
+ Mutex::Autolock _l(thread->mLock);
thread->broadcast_l();
}
}
@@ -1246,7 +1257,7 @@
//To be called with thread lock held
void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
int64_t trackFramesReleased, int64_t sinkFramesWritten,
- const ExtendedTimestamp &timeStamp) {
+ uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
//update frame map
mFrameMap.push(trackFramesReleased, sinkFramesWritten);
@@ -1255,6 +1266,7 @@
// Our timestamps are only updated when the track is on the Thread active list.
// We need to ensure that tracks are not removed before full drain.
ExtendedTimestamp local = timeStamp;
+ bool drained = true; // default assume drained, if no server info found
bool checked = false;
for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
@@ -1263,18 +1275,25 @@
local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
// check drain state from the latest stage in the pipeline.
if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
- mAudioTrackServerProxy->setDrained(
- local.mPosition[i] >= mAudioTrackServerProxy->framesReleased());
+ drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
checked = true;
}
}
}
- if (!checked) { // no server info, assume drained.
- mAudioTrackServerProxy->setDrained(true);
- }
+
+ mAudioTrackServerProxy->setDrained(drained);
// Set correction for flushed frames that are not accounted for in released.
local.mFlushed = mAudioTrackServerProxy->framesFlushed();
mServerProxy->setTimestamp(local);
+
+ // Compute latency info.
+ const bool useTrackTimestamp = !drained;
+ const double latencyMs = useTrackTimestamp
+ ? local.getOutputServerLatencyMs(sampleRate())
+ : timeStamp.getOutputServerLatencyMs(halSampleRate);
+
+ mServerLatencyFromTrack.store(useTrackTimestamp);
+ mServerLatencyMs.store(latencyMs);
}
// ----------------------------------------------------------------------------
@@ -1342,7 +1361,7 @@
mActive = false;
}
-bool AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
+ssize_t AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
{
Buffer *pInBuffer;
Buffer inBuffer;
@@ -1431,9 +1450,12 @@
mBufferQueue.add(pInBuffer);
ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %zu", this,
mThread.unsafe_get(), mBufferQueue.size());
+ // audio data is consumed (stored locally); set frameCount to 0.
+ inBuffer.frameCount = 0;
} else {
ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
mThread.unsafe_get(), this);
+ // TODO: return error for this.
}
}
}
@@ -1444,7 +1466,7 @@
stop();
}
- return outputBufferFull;
+ return frames - inBuffer.frameCount; // number of frames consumed.
}
void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
@@ -1509,7 +1531,7 @@
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, nullptr /* sharedBuffer */,
- AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
+ AUDIO_SESSION_NONE, AID_AUDIOSERVER, flags, TYPE_PATCH),
mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
{
uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
@@ -1528,7 +1550,7 @@
}
status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
- audio_session_t triggerSession)
+ audio_session_t triggerSession)
{
status_t status = Track::start(event, triggerSession);
if (status != NO_ERROR) {
@@ -1672,18 +1694,20 @@
return;
}
- mRecordBufferConverter = new RecordBufferConverter(
- thread->mChannelMask, thread->mFormat, thread->mSampleRate,
- channelMask, format, sampleRate);
- // Check if the RecordBufferConverter construction was successful.
- // If not, don't continue with construction.
- //
- // NOTE: It would be extremely rare that the record track cannot be created
- // for the current device, but a pending or future device change would make
- // the record track configuration valid.
- if (mRecordBufferConverter->initCheck() != NO_ERROR) {
- ALOGE("RecordTrack unable to create record buffer converter");
- return;
+ if (!isDirect()) {
+ mRecordBufferConverter = new RecordBufferConverter(
+ thread->mChannelMask, thread->mFormat, thread->mSampleRate,
+ channelMask, format, sampleRate);
+ // Check if the RecordBufferConverter construction was successful.
+ // If not, don't continue with construction.
+ //
+ // NOTE: It would be extremely rare that the record track cannot be created
+ // for the current device, but a pending or future device change would make
+ // the record track configuration valid.
+ if (mRecordBufferConverter->initCheck() != NO_ERROR) {
+ ALOGE("RecordTrack unable to create record buffer converter");
+ return;
+ }
}
mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
@@ -1694,7 +1718,15 @@
if (flags & AUDIO_INPUT_FLAG_FAST) {
ALOG_ASSERT(thread->mFastTrackAvail);
thread->mFastTrackAvail = false;
+ } else {
+ // TODO: only Normal Record has timestamps (Fast Record does not).
+ mServerLatencySupported = true;
}
+#ifdef TEE_SINK
+ mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+ + "_" + std::to_string(mId)
+ + "_R");
+#endif
}
AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
@@ -1783,19 +1815,22 @@
}
-/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
+void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
{
- result.append("Active Client Session S Flags Format Chn mask SRate Server FrmCnt Sil\n");
+ result.appendFormat("Active Client Session S Flags "
+ " Format Chn mask SRate Source "
+ " Server FrmCnt FrmRdy Sil%s\n",
+ isServerLatencySupported() ? " Latency" : "");
}
void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
{
result.appendFormat("%c%5s %6u %7u %2s 0x%03X "
- "%08X %08X %6u "
- "%08X %6zu %3c\n",
+ "%08X %08X %6u %6X "
+ "%08X %6zu %6zu %3c",
isFastTrack() ? 'F' : ' ',
active ? "yes" : "no",
- (mClient == 0) ? getpid_cached : mClient->pid(),
+ (mClient == 0) ? getpid() : mClient->pid(),
mSessionId,
getTrackStateString(),
mCblk->mFlags,
@@ -1803,11 +1838,25 @@
mFormat,
mChannelMask,
mSampleRate,
+ mAttr.source,
mCblk->mServer,
mFrameCount,
+ mServerProxy->framesReadySafe(),
isSilenced() ? 's' : 'n'
);
+ if (isServerLatencySupported()) {
+ double latencyMs;
+ bool fromTrack;
+ if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
+ // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
+ // or 'k' if estimated from kernel (usually for debugging).
+ result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
+ } else {
+ result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
+ }
+ }
+ result.append("\n");
}
void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
@@ -1850,6 +1899,15 @@
}
}
mServerProxy->setTimestamp(local);
+
+ // Compute latency info.
+ const bool useTrackTimestamp = true; // use track unless debugging.
+ const double latencyMs = - (useTrackTimestamp
+ ? local.getOutputServerLatencyMs(sampleRate())
+ : timestamp.getOutputServerLatencyMs(halSampleRate));
+
+ mServerLatencyFromTrack.store(useTrackTimestamp);
+ mServerLatencyMs.store(latencyMs);
}
status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
@@ -1875,7 +1933,8 @@
: RecordTrack(recordThread, NULL,
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
- buffer, bufferSize, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
+ buffer, bufferSize, AUDIO_SESSION_NONE, AID_AUDIOSERVER,
+ flags, TYPE_PATCH),
mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
{
uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
@@ -1940,13 +1999,14 @@
audio_format_t format,
audio_channel_mask_t channelMask,
audio_session_t sessionId,
+ bool isOut,
uid_t uid,
pid_t pid,
audio_port_handle_t portId)
: TrackBase(thread, NULL, attr, sampleRate, format,
channelMask, (size_t)0 /* frameCount */,
nullptr /* buffer */, (size_t)0 /* bufferSize */,
- sessionId, uid, false /* isOut */,
+ sessionId, uid, isOut,
ALLOC_NONE,
TYPE_DEFAULT, portId),
mPid(pid), mSilenced(false), mSilencedNotified(false)
@@ -1963,7 +2023,7 @@
}
status_t AudioFlinger::MmapThread::MmapTrack::start(AudioSystem::sync_event_t event __unused,
- audio_session_t triggerSession __unused)
+ audio_session_t triggerSession __unused)
{
return NO_ERROR;
}
@@ -1994,19 +2054,27 @@
{
}
-/*static*/ void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
+void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
{
- result.append("Client Session Format Chn mask SRate\n");
+ result.appendFormat("Client Session Format Chn mask SRate Flags %s\n",
+ isOut() ? "Usg CT": "Source");
}
void AudioFlinger::MmapThread::MmapTrack::appendDump(String8& result, bool active __unused)
{
- result.appendFormat("%6u %7u %08X %08X %6u\n",
+ result.appendFormat("%6u %7u %08X %08X %6u 0x%03X ",
mPid,
mSessionId,
mFormat,
mChannelMask,
- mSampleRate);
+ mSampleRate,
+ mAttr.flags);
+ if (isOut()) {
+ result.appendFormat("%3x %2x", mAttr.usage, mAttr.content_type);
+ } else {
+ result.appendFormat("%6x", mAttr.source);
+ }
+ result.append("\n");
}
} // namespace android
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index d29cae1..b75e957 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -13,7 +13,6 @@
$(call include-path-for, audio-utils) \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy/engine/interface \
- frameworks/av/services/audiopolicy/utilities
LOCAL_SHARED_LIBRARIES := \
libcutils \
@@ -22,10 +21,10 @@
libbinder \
libaudioclient \
libhardware_legacy \
- libserviceutility \
libaudiopolicymanager \
libmedia_helper \
libmediametrics \
+ libmediautils \
libeffectsconfig
LOCAL_STATIC_LIBRARIES := \
@@ -74,7 +73,6 @@
LOCAL_C_INCLUDES += \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy/engine/interface \
- frameworks/av/services/audiopolicy/utilities
LOCAL_STATIC_LIBRARIES := \
libaudiopolicycomponents
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 4812b1f..fe49483 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -324,11 +324,6 @@
// function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
- // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
- // over a telephony device during a phone call.
- virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream) = 0;
- virtual status_t stopTone() = 0;
-
// set down link audio volume.
virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index e69e687..b3611c4 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -35,8 +35,7 @@
$(LOCAL_PATH)/include \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy \
- frameworks/av/services/audiopolicy/utilities \
- system/media/audio_utils/include \
+ $(call include-path-for, audio-utils) \
ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
similarity index 67%
rename from services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
rename to services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
index e0037fc..9f3fc0c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
@@ -19,26 +19,27 @@
namespace android {
/**
- * Interface for input descriptors to implement so dependent audio sessions can query information
- * about their context
+ * Interface for I/O descriptors to implement so information about their context
+ * can be queried and updated.
*/
-class AudioSessionInfoProvider
+class AudioIODescriptorInterface
{
public:
- virtual ~AudioSessionInfoProvider() {};
+ virtual ~AudioIODescriptorInterface() {};
virtual audio_config_base_t getConfig() const = 0;
virtual audio_patch_handle_t getPatchHandle() const = 0;
+ virtual void setPatchHandle(audio_patch_handle_t handle) = 0;
};
-class AudioSessionInfoUpdateListener
+class AudioIODescriptorUpdateListener
{
public:
- virtual ~AudioSessionInfoUpdateListener() {};
+ virtual ~AudioIODescriptorUpdateListener() {};
- virtual void onSessionInfoUpdate() const = 0;;
+ virtual void onIODescriptorUpdate() const = 0;
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index b25d6d4..85f3b86 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -16,9 +16,9 @@
#pragma once
+#include "AudioIODescriptorInterface.h"
#include "AudioPort.h"
#include "AudioSession.h"
-#include "AudioSessionInfoProvider.h"
#include <utils/Errors.h>
#include <system/audio.h>
#include <utils/SortedVector.h>
@@ -31,7 +31,7 @@
// descriptor for audio inputs. Used to maintain current configuration of each opened audio input
// and keep track of the usage of this input.
-class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
+class AudioInputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
{
public:
explicit AudioInputDescriptor(const sp<IOProfile>& profile,
@@ -67,11 +67,10 @@
size_t getAudioSessionCount(bool activeOnly) const;
audio_source_t getHighestPrioritySource(bool activeOnly) const;
- // implementation of AudioSessionInfoProvider
- virtual audio_config_base_t getConfig() const;
- virtual audio_patch_handle_t getPatchHandle() const;
-
- void setPatchHandle(audio_patch_handle_t handle);
+ // implementation of AudioIODescriptorInterface
+ audio_config_base_t getConfig() const override;
+ audio_patch_handle_t getPatchHandle() const override;
+ void setPatchHandle(audio_patch_handle_t handle) override;
status_t open(const audio_config_t *config,
audio_devices_t device,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 5e5d38b..57d1cfa 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -24,6 +24,7 @@
#include <utils/Timers.h>
#include <utils/KeyedVector.h>
#include <system/audio.h>
+#include "AudioIODescriptorInterface.h"
#include "AudioSourceDescriptor.h"
namespace android {
@@ -35,7 +36,7 @@
// descriptor for audio outputs. Used to maintain current configuration of each opened audio output
// and keep track of the usage of this output by each audio stream type.
-class AudioOutputDescriptor: public AudioPortConfig
+class AudioOutputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
{
public:
AudioOutputDescriptor(const sp<AudioPort>& port,
@@ -73,8 +74,10 @@
audio_module_handle_t getModuleHandle() const;
- audio_patch_handle_t getPatchHandle() const { return mPatchHandle; };
- void setPatchHandle(audio_patch_handle_t handle) { mPatchHandle = handle; };
+ // implementation of AudioIODescriptorInterface
+ audio_config_base_t getConfig() const override;
+ audio_patch_handle_t getPatchHandle() const override;
+ void setPatchHandle(audio_patch_handle_t handle) override;
sp<AudioPort> mPort;
audio_devices_t mDevice; // current device this output is routed to
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index 43f6ed6..f861b95 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -38,16 +38,24 @@
AudioPolicyConfig(HwModuleCollection &hwModules,
DeviceVector &availableOutputDevices,
DeviceVector &availableInputDevices,
- sp<DeviceDescriptor> &defaultOutputDevices,
+ sp<DeviceDescriptor> &defaultOutputDevice,
VolumeCurvesCollection *volumes = nullptr)
: mHwModules(hwModules),
mAvailableOutputDevices(availableOutputDevices),
mAvailableInputDevices(availableInputDevices),
- mDefaultOutputDevices(defaultOutputDevices),
+ mDefaultOutputDevice(defaultOutputDevice),
mVolumeCurves(volumes),
mIsSpeakerDrcEnabled(false)
{}
+ const std::string& getSource() const {
+ return mSource;
+ }
+
+ void setSource(const std::string& file) {
+ mSource = file;
+ }
+
void setVolumes(const VolumeCurvesCollection &volumes)
{
if (mVolumeCurves != nullptr) {
@@ -100,46 +108,52 @@
void setDefaultOutputDevice(const sp<DeviceDescriptor> &defaultDevice)
{
- mDefaultOutputDevices = defaultDevice;
+ mDefaultOutputDevice = defaultDevice;
}
- const sp<DeviceDescriptor> &getDefaultOutputDevice() const { return mDefaultOutputDevices; }
+ const sp<DeviceDescriptor> &getDefaultOutputDevice() const { return mDefaultOutputDevice; }
void setDefault(void)
{
- mDefaultOutputDevices = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
- sp<HwModule> module;
+ mSource = "AudioPolicyConfig::setDefault";
+ mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
+ mDefaultOutputDevice->addAudioProfile(AudioProfile::createFullDynamic());
sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
- mAvailableOutputDevices.add(mDefaultOutputDevices);
+ defaultInputDevice->addAudioProfile(AudioProfile::createFullDynamic());
+ sp<AudioProfile> micProfile = new AudioProfile(
+ AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000);
+ defaultInputDevice->addAudioProfile(micProfile);
+ mAvailableOutputDevices.add(mDefaultOutputDevice);
mAvailableInputDevices.add(defaultInputDevice);
- module = new HwModule(AUDIO_HARDWARE_MODULE_ID_PRIMARY);
+ sp<HwModule> module = new HwModule(AUDIO_HARDWARE_MODULE_ID_PRIMARY, 2 /*halVersionMajor*/);
+ mHwModules.add(module);
+ mDefaultOutputDevice->attach(module);
+ defaultInputDevice->attach(module);
sp<OutputProfile> outProfile;
outProfile = new OutputProfile(String8("primary"));
outProfile->attach(module);
outProfile->addAudioProfile(
new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 44100));
- outProfile->addSupportedDevice(mDefaultOutputDevices);
+ outProfile->addSupportedDevice(mDefaultOutputDevice);
outProfile->setFlags(AUDIO_OUTPUT_FLAG_PRIMARY);
module->addOutputProfile(outProfile);
sp<InputProfile> inProfile;
inProfile = new InputProfile(String8("primary"));
inProfile->attach(module);
- inProfile->addAudioProfile(
- new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000));
+ inProfile->addAudioProfile(micProfile);
inProfile->addSupportedDevice(defaultInputDevice);
module->addInputProfile(inProfile);
-
- mHwModules.add(module);
}
private:
+ std::string mSource;
HwModuleCollection &mHwModules; /**< Collection of Module, with Profiles, i.e. Mix Ports. */
DeviceVector &mAvailableOutputDevices;
DeviceVector &mAvailableInputDevices;
- sp<DeviceDescriptor> &mDefaultOutputDevices;
+ sp<DeviceDescriptor> &mDefaultOutputDevice;
VolumeCurvesCollection *mVolumeCurves;
// TODO: remove when legacy conf file is removed. true on devices that use DRC on the
// DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index 09a86dd..bd7517f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -153,9 +153,6 @@
class AudioPortConfig : public virtual RefBase
{
public:
- AudioPortConfig();
- virtual ~AudioPortConfig() {}
-
status_t applyAudioPortConfig(const struct audio_port_config *config,
struct audio_port_config *backupConfig = NULL);
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -165,10 +162,11 @@
return (other != 0) &&
(other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
}
- uint32_t mSamplingRate;
- audio_format_t mFormat;
- audio_channel_mask_t mChannelMask;
- struct audio_gain_config mGain;
+ unsigned int mSamplingRate = 0u;
+ audio_format_t mFormat = AUDIO_FORMAT_INVALID;
+ audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+ struct audio_gain_config mGain = { .index = -1 };
+ union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
index 8741c66..4226ff2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
@@ -36,6 +36,8 @@
class AudioProfile : public virtual RefBase
{
public:
+ static sp<AudioProfile> createFullDynamic();
+
AudioProfile(audio_format_t format,
audio_channel_mask_t channelMasks,
uint32_t samplingRate) :
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index dd5247d..53e6ec9 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -23,13 +23,13 @@
#include <utils/KeyedVector.h>
#include <media/AudioPolicy.h>
#include <media/IAudioPolicyServiceClient.h>
-#include "AudioSessionInfoProvider.h"
+#include "AudioIODescriptorInterface.h"
namespace android {
class AudioPolicyClientInterface;
-class AudioSession : public RefBase, public AudioSessionInfoUpdateListener
+class AudioSession : public RefBase, public AudioIODescriptorUpdateListener
{
public:
AudioSession(audio_session_t session,
@@ -63,9 +63,9 @@
uint32_t changeOpenCount(int delta);
uint32_t changeActiveCount(int delta);
- void setInfoProvider(AudioSessionInfoProvider *provider);
- // implementation of AudioSessionInfoUpdateListener
- virtual void onSessionInfoUpdate() const;
+ void setInfoProvider(AudioIODescriptorInterface *provider);
+ // implementation of AudioIODescriptorUpdateListener
+ virtual void onIODescriptorUpdate() const;
private:
record_client_info_t mRecordClientInfo;
@@ -77,17 +77,17 @@
uint32_t mActiveCount;
AudioMix* mPolicyMix; // non NULL when used by a dynamic policy
AudioPolicyClientInterface* mClientInterface;
- const AudioSessionInfoProvider* mInfoProvider;
+ const AudioIODescriptorInterface* mInfoProvider;
};
class AudioSessionCollection :
public DefaultKeyedVector<audio_session_t, sp<AudioSession> >,
- public AudioSessionInfoUpdateListener
+ public AudioIODescriptorUpdateListener
{
public:
status_t addSession(audio_session_t session,
const sp<AudioSession>& audioSession,
- AudioSessionInfoProvider *provider);
+ AudioIODescriptorInterface *provider);
status_t removeSession(audio_session_t session);
@@ -99,8 +99,8 @@
bool isSourceActive(audio_source_t source) const;
audio_source_t getHighestPrioritySource(bool activeOnly) const;
- // implementation of AudioSessionInfoUpdateListener
- virtual void onSessionInfoUpdate() const;
+ // implementation of AudioIODescriptorUpdateListener
+ virtual void onIODescriptorUpdate() const;
status_t dump(int fd, int spaces) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 92a4c3e..2325e4f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -76,11 +76,12 @@
audio_devices_t types() const { return mDeviceTypes; }
- sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8& address) const;
- DeviceVector getDevicesFromType(audio_devices_t types) const;
+ // If 'address' is empty, a device with a non-empty address may be returned
+ // if there is no device with the specified 'type' and empty address.
+ sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address) const;
+ DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
sp<DeviceDescriptor> getDeviceFromTagName(const String8 &tagName) const;
- DeviceVector getDevicesFromTypeAddr(audio_devices_t type, const String8& address) const;
audio_devices_t getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index cb9f49e..05cfc31 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -107,7 +107,7 @@
sp<DeviceDescriptor> getDeviceDescriptor(const audio_devices_t device,
const char *device_address,
const char *device_name,
- bool matchAdress = true) const;
+ bool matchAddress = true) const;
status_t dump(int fd) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 92332fb..f0144db 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -164,7 +164,7 @@
status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
const sp<AudioSession>& audioSession) {
- return mSessions.addSession(session, audioSession, /*AudioSessionInfoProvider*/this);
+ return mSessions.addSession(session, audioSession, /*AudioIODescriptorInterface*/this);
}
status_t AudioInputDescriptor::removeAudioSession(audio_session_t session) {
@@ -179,7 +179,7 @@
void AudioInputDescriptor::setPatchHandle(audio_patch_handle_t handle)
{
mPatchHandle = handle;
- mSessions.onSessionInfoUpdate();
+ mSessions.onIODescriptorUpdate();
}
audio_config_base_t AudioInputDescriptor::getConfig() const
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 294a2a6..3c69de5 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -55,11 +55,28 @@
}
}
+audio_config_base_t AudioOutputDescriptor::getConfig() const
+{
+ const audio_config_base_t config = { .sample_rate = mSamplingRate, .channel_mask = mChannelMask,
+ .format = mFormat };
+ return config;
+}
+
audio_module_handle_t AudioOutputDescriptor::getModuleHandle() const
{
return mPort.get() != nullptr ? mPort->getModuleHandle() : AUDIO_MODULE_HANDLE_NONE;
}
+audio_patch_handle_t AudioOutputDescriptor::getPatchHandle() const
+{
+ return mPatchHandle;
+}
+
+void AudioOutputDescriptor::setPatchHandle(audio_patch_handle_t handle)
+{
+ mPatchHandle = handle;
+}
+
audio_port_handle_t AudioOutputDescriptor::getId() const
{
return mId;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index a9fe48d..e78e121 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -34,51 +34,32 @@
{
}
+static String8 dumpPatchEndpoints(
+ int spaces, const char *prefix, int count, const audio_port_config *cfgs)
+{
+ String8 result;
+ for (int i = 0; i < count; ++i) {
+ const audio_port_config &cfg = cfgs[i];
+ result.appendFormat("%*s [%s %d] ", spaces, "", prefix, i + 1);
+ if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
+ std::string device;
+ deviceToString(cfg.ext.device.type, device);
+ result.appendFormat("Device ID %d %s", cfg.id, device.c_str());
+ } else {
+ result.appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
+ }
+ result.append("\n");
+ }
+ return result;
+}
+
status_t AudioPatch::dump(int fd, int spaces, int index) const
{
- const size_t SIZE = 256;
- char buffer[SIZE];
String8 result;
-
- snprintf(buffer, SIZE, "%*sAudio patch %d:\n", spaces, "", index+1);
- result.append(buffer);
- snprintf(buffer, SIZE, "%*s- handle: %2d\n", spaces, "", mHandle);
- result.append(buffer);
- snprintf(buffer, SIZE, "%*s- audio flinger handle: %2d\n", spaces, "", mAfPatchHandle);
- result.append(buffer);
- snprintf(buffer, SIZE, "%*s- owner uid: %2d\n", spaces, "", mUid);
- result.append(buffer);
- snprintf(buffer, SIZE, "%*s- %d sources:\n", spaces, "", mPatch.num_sources);
- result.append(buffer);
- for (size_t i = 0; i < mPatch.num_sources; i++) {
- if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
- std::string device;
- deviceToString(mPatch.sources[i].ext.device.type, device);
- snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
- mPatch.sources[i].id,
- device.c_str());
- } else {
- snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
- mPatch.sources[i].id, mPatch.sources[i].ext.mix.handle);
- }
- result.append(buffer);
- }
- snprintf(buffer, SIZE, "%*s- %d sinks:\n", spaces, "", mPatch.num_sinks);
- result.append(buffer);
- for (size_t i = 0; i < mPatch.num_sinks; i++) {
- if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
- std::string device;
- deviceToString(mPatch.sinks[i].ext.device.type, device);
- snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
- mPatch.sinks[i].id,
- device.c_str());
- } else {
- snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
- mPatch.sinks[i].id, mPatch.sinks[i].ext.mix.handle);
- }
- result.append(buffer);
- }
-
+ result.appendFormat("%*sPatch %d: owner uid %4d, handle %2d, af handle %2d\n",
+ spaces, "", index + 1, mUid, mHandle, mAfPatchHandle);
+ result.append(dumpPatchEndpoints(spaces, "src ", mPatch.num_sources, mPatch.sources));
+ result.append(dumpPatchEndpoints(spaces, "sink", mPatch.num_sinks, mPatch.sinks));
write(fd, result.string(), result.size());
return NO_ERROR;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index d85562e..3fe37ab 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -386,22 +386,12 @@
// --- AudioPortConfig class implementation
-AudioPortConfig::AudioPortConfig()
-{
- mSamplingRate = 0;
- mChannelMask = AUDIO_CHANNEL_NONE;
- mFormat = AUDIO_FORMAT_INVALID;
- memset(&mGain, 0, sizeof(struct audio_gain_config));
- mGain.index = -1;
-}
-
status_t AudioPortConfig::applyAudioPortConfig(const struct audio_port_config *config,
struct audio_port_config *backupConfig)
{
- struct audio_port_config localBackupConfig;
+ struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
status_t status = NO_ERROR;
- localBackupConfig.config_mask = config->config_mask;
toAudioPortConfig(&localBackupConfig);
sp<AudioPort> audioport = getAudioPort();
@@ -425,6 +415,9 @@
if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
mGain = config->gain;
}
+ if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+ mFlags = config->flags;
+ }
exit:
if (status != NO_ERROR) {
@@ -436,33 +429,38 @@
return status;
}
+namespace {
+
+template<typename T>
+void updateField(
+ const T& portConfigField, T audio_port_config::*port_config_field,
+ struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig,
+ unsigned int configMask, T defaultValue)
+{
+ if (dstConfig->config_mask & configMask) {
+ if ((srcConfig != nullptr) && (srcConfig->config_mask & configMask)) {
+ dstConfig->*port_config_field = srcConfig->*port_config_field;
+ } else {
+ dstConfig->*port_config_field = portConfigField;
+ }
+ } else {
+ dstConfig->*port_config_field = defaultValue;
+ }
+}
+
+} // namespace
+
void AudioPortConfig::toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig) const
{
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
- dstConfig->sample_rate = mSamplingRate;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE)) {
- dstConfig->sample_rate = srcConfig->sample_rate;
- }
- } else {
- dstConfig->sample_rate = 0;
- }
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
- dstConfig->channel_mask = mChannelMask;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK)) {
- dstConfig->channel_mask = srcConfig->channel_mask;
- }
- } else {
- dstConfig->channel_mask = AUDIO_CHANNEL_NONE;
- }
- if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
- dstConfig->format = mFormat;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT)) {
- dstConfig->format = srcConfig->format;
- }
- } else {
- dstConfig->format = AUDIO_FORMAT_INVALID;
- }
+ updateField(mSamplingRate, &audio_port_config::sample_rate,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_SAMPLE_RATE, 0u);
+ updateField(mChannelMask, &audio_port_config::channel_mask,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_CHANNEL_MASK,
+ (audio_channel_mask_t)AUDIO_CHANNEL_NONE);
+ updateField(mFormat, &audio_port_config::format,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_FORMAT, AUDIO_FORMAT_INVALID);
+
sp<AudioPort> audioport = getAudioPort();
if ((dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) && audioport != NULL) {
dstConfig->gain = mGain;
@@ -478,6 +476,9 @@
} else {
dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
}
+
+ updateField(mFlags, &audio_port_config::flags,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
}
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index fd6fc1c..26af9b4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -28,6 +28,23 @@
namespace android {
+static AudioProfile* createFullDynamicImpl()
+{
+ AudioProfile* dynamicProfile = new AudioProfile(gDynamicFormat,
+ ChannelsVector(), SampleRateVector());
+ dynamicProfile->setDynamicFormat(true);
+ dynamicProfile->setDynamicChannels(true);
+ dynamicProfile->setDynamicRate(true);
+ return dynamicProfile;
+}
+
+// static
+sp<AudioProfile> AudioProfile::createFullDynamic()
+{
+ static sp<AudioProfile> dynamicProfile = createFullDynamicImpl();
+ return dynamicProfile;
+}
+
status_t AudioProfile::checkExact(uint32_t samplingRate, audio_channel_mask_t channelMask,
audio_format_t format) const
{
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index 7cda46b..91dee35 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -86,7 +86,7 @@
}
// Recording configuration callback:
- const AudioSessionInfoProvider* provider = mInfoProvider;
+ const AudioIODescriptorInterface* provider = mInfoProvider;
const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
AUDIO_CONFIG_BASE_INITIALIZER;
const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
@@ -114,16 +114,16 @@
return false;
}
-void AudioSession::setInfoProvider(AudioSessionInfoProvider *provider)
+void AudioSession::setInfoProvider(AudioIODescriptorInterface *provider)
{
mInfoProvider = provider;
}
-void AudioSession::onSessionInfoUpdate() const
+void AudioSession::onIODescriptorUpdate() const
{
if (mActiveCount > 0) {
// resend the callback after requerying the informations from the info provider
- const AudioSessionInfoProvider* provider = mInfoProvider;
+ const AudioIODescriptorInterface* provider = mInfoProvider;
const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
AUDIO_CONFIG_BASE_INITIALIZER;
const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
@@ -170,7 +170,7 @@
status_t AudioSessionCollection::addSession(audio_session_t session,
const sp<AudioSession>& audioSession,
- AudioSessionInfoProvider *provider)
+ AudioIODescriptorInterface *provider)
{
ssize_t index = indexOfKey(session);
@@ -271,10 +271,10 @@
return source;
}
-void AudioSessionCollection::onSessionInfoUpdate() const
+void AudioSessionCollection::onIODescriptorUpdate() const
{
for (size_t i = 0; i < size(); i++) {
- valueAt(i)->onSessionInfoUpdate();
+ valueAt(i)->onIODescriptorUpdate();
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
index 1e105f5..19eac26 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -412,6 +412,7 @@
free(data);
ALOGI("loadAudioPolicyConfig() loaded %s\n", path);
+ config.setSource(path);
return NO_ERROR;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 19c2062..d3cc8b9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -145,8 +145,8 @@
}
}
}
- ALOGV("DeviceVector::getDevice() for type %08x address %s found %p",
- type, address.string(), device.get());
+ ALOGV("DeviceVector::%s() for type %08x address \"%s\" found %p",
+ __func__, type, address.string(), device.get());
return device;
}
@@ -160,7 +160,7 @@
return nullptr;
}
-DeviceVector DeviceVector::getDevicesFromType(audio_devices_t type) const
+DeviceVector DeviceVector::getDevicesFromTypeMask(audio_devices_t type) const
{
DeviceVector devices;
bool isOutput = audio_is_output_devices(type);
@@ -171,20 +171,8 @@
if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
devices.add(itemAt(i));
type &= ~curType;
- ALOGV("DeviceVector::getDevicesFromType() for type %x found %p",
- itemAt(i)->type(), itemAt(i).get());
- }
- }
- return devices;
-}
-
-DeviceVector DeviceVector::getDevicesFromTypeAddr(
- audio_devices_t type, const String8& address) const
-{
- DeviceVector devices;
- for (const auto& device : *this) {
- if (device->type() == type && device->mAddress == address) {
- devices.add(device);
+ ALOGV("DeviceVector::%s() for type %08x found %p",
+ __func__, itemAt(i)->type(), itemAt(i).get());
}
}
return devices;
@@ -253,7 +241,7 @@
void DeviceDescriptor::toAudioPort(struct audio_port *port) const
{
- ALOGV("DeviceDescriptor::toAudioPort() handle %d type %x", mId, mDeviceType);
+ ALOGV("DeviceDescriptor::toAudioPort() handle %d type %08x", mId, mDeviceType);
AudioPort::toAudioPort(port);
port->id = mId;
toAudioPortConfig(&port->active_config);
@@ -305,7 +293,7 @@
{
std::string device;
deviceToString(mDeviceType, device);
- ALOGI("Device id:%d type:0x%X:%s, addr:%s", mId, mDeviceType, device.c_str(),
+ ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId, mDeviceType, device.c_str(),
mAddress.string());
AudioPort::log(" ");
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index aef7dbe..dcc0ec8 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -278,9 +278,10 @@
sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
const char *device_address,
const char *device_name,
- bool matchAdress) const
+ bool matchAddress) const
{
- String8 address = (device_address == nullptr) ? String8("") : String8(device_address);
+ String8 address = (device_address == nullptr || !matchAddress) ?
+ String8("") : String8(device_address);
// handle legacy remote submix case where the address was not always specified
if (device_distinguishes_on_address(device) && (address.length() == 0)) {
address = String8("0");
@@ -288,15 +289,9 @@
for (const auto& hwModule : *this) {
DeviceVector declaredDevices = hwModule->getDeclaredDevices();
- DeviceVector deviceList = declaredDevices.getDevicesFromTypeAddr(device, address);
- if (!deviceList.isEmpty()) {
- return deviceList.itemAt(0);
- }
- if (!matchAdress) {
- deviceList = declaredDevices.getDevicesFromType(device);
- if (!deviceList.isEmpty()) {
- return deviceList.itemAt(0);
- }
+ sp<DeviceDescriptor> deviceDesc = declaredDevices.getDevice(device, address);
+ if (deviceDesc) {
+ return deviceDesc;
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index a253113..8008a7c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -242,12 +242,7 @@
AudioProfileTraits::Collection profiles;
deserializeCollection<AudioProfileTraits>(doc, child, profiles, NULL);
if (profiles.isEmpty()) {
- sp <AudioProfile> dynamicProfile = new AudioProfile(gDynamicFormat,
- ChannelsVector(), SampleRateVector());
- dynamicProfile->setDynamicFormat(true);
- dynamicProfile->setDynamicChannels(true);
- dynamicProfile->setDynamicRate(true);
- profiles.add(dynamicProfile);
+ profiles.add(AudioProfile::createFullDynamic());
}
mixPort->setAudioProfiles(profiles);
@@ -328,12 +323,7 @@
AudioProfileTraits::Collection profiles;
deserializeCollection<AudioProfileTraits>(doc, root, profiles, NULL);
if (profiles.isEmpty()) {
- sp <AudioProfile> dynamicProfile = new AudioProfile(gDynamicFormat,
- ChannelsVector(), SampleRateVector());
- dynamicProfile->setDynamicFormat(true);
- dynamicProfile->setDynamicChannels(true);
- dynamicProfile->setDynamicRate(true);
- profiles.add(dynamicProfile);
+ profiles.add(AudioProfile::createFullDynamic());
}
deviceDesc->setAudioProfiles(profiles);
diff --git a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
index d34214b..38ab560 100644
--- a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
@@ -129,7 +129,7 @@
if (streamType == route->mStreamType && route->isActiveOrChanged()
&& route->mDeviceDescriptor != 0) {
device = route->mDeviceDescriptor->type();
- if (!availableDevices.getDevicesFromType(device).isEmpty()) {
+ if (!availableDevices.getDevicesFromTypeMask(device).isEmpty()) {
break;
}
}
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index a75f1cb..9381f1f 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -185,6 +185,9 @@
<!-- Hearing aid Audio HAL -->
<xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+ <!-- MSD Audio HAL (optional) -->
+ <xi:include href="msd_audio_policy_configuration.xml"/>
+
</modules>
<!-- End of Modules section -->
diff --git a/services/audiopolicy/config/msd_audio_policy_configuration.xml b/services/audiopolicy/config/msd_audio_policy_configuration.xml
new file mode 100644
index 0000000..a84117e
--- /dev/null
+++ b/services/audiopolicy/config/msd_audio_policy_configuration.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2017-2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Multi Stream Decoder Audio Policy Configuration file -->
+<module name="msd" halVersion="2.0">
+ <attachedDevices>
+ <item>MS12 Input</item>
+ <item>MS12 Output</item>
+ </attachedDevices>
+ <mixPorts>
+ <mixPort name="ms12 input" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="ms12 compressed input" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_AC4"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_5POINT1,AUDIO_CHANNEL_OUT_7POINT1"/>
+ </mixPort>
+ <mixPort name="ms12 output" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="MS12 Input" type="AUDIO_DEVICE_OUT_BUS" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="MS12 Output" type="AUDIO_DEVICE_IN_BUS" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="MS12 Input" sources="ms12 input,ms12 compressed input"/>
+ <route type="mix" sink="ms12 output" sources="MS12 Output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index 36e0f42..b128a38 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -10,7 +10,6 @@
$(LOCAL_PATH)/include \
frameworks/av/services/audiopolicy/engineconfigurable/include \
frameworks/av/services/audiopolicy/engineconfigurable/interface \
- frameworks/av/services/audiopolicy/utilities/convert \
LOCAL_SRC_FILES:= ParameterManagerWrapper.cpp
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 3e13e50..941119b 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -408,8 +408,7 @@
case STRATEGY_SONIFICATION:
- // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
- // handleIncallSonification().
+ // If incall, just select the STRATEGY_PHONE device
if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
device = getDeviceForStrategyInt(
STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
@@ -482,7 +481,7 @@
}
}
availableOutputDevices =
- availableOutputDevices.getDevicesFromType(availableOutputDevicesType);
+ availableOutputDevices.getDevicesFromTypeMask(availableOutputDevicesType);
if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
return getDeviceForStrategyInt(
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index a546c8f..5cdd63a 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -38,6 +38,8 @@
#include <utils/Log.h>
#include <media/AudioParameter.h>
#include <media/AudioPolicyHelper.h>
+#include <media/PatchBuilder.h>
+#include <private/android_filesystem_config.h>
#include <soundtrigger/SoundTrigger.h>
#include <system/audio.h>
#include <audio_policy_conf.h>
@@ -200,27 +202,25 @@
return BAD_VALUE;
}
- // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
- // output is suspended before any tracks are moved to it
- checkA2dpSuspend();
- checkOutputForAllStrategies();
- // outputs must be closed after checkOutputForAllStrategies() is executed
- if (!outputs.isEmpty()) {
- for (audio_io_handle_t output : outputs) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
- // close unused outputs after device disconnection or direct outputs that have been
- // opened by checkOutputsForDevice() to query dynamic parameters
- if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
- (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
- (desc->mDirectOpenCount == 0))) {
- closeOutput(output);
+ checkForDeviceAndOutputChanges([&]() {
+ // outputs must be closed after checkOutputForAllStrategies() is executed
+ if (!outputs.isEmpty()) {
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
+ // close unused outputs after device disconnection or direct outputs that have been
+ // opened by checkOutputsForDevice() to query dynamic parameters
+ if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
+ (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+ (desc->mDirectOpenCount == 0))) {
+ closeOutput(output);
+ }
}
+ // check A2DP again after closing A2DP output to reset mA2dpSuspended if needed
+ return true;
}
- // check again after closing A2DP output to reset mA2dpSuspended if needed
- checkA2dpSuspend();
- }
+ return false;
+ });
- updateDevicesAndOutputs();
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
updateCallRouting(newDevice);
@@ -476,20 +476,15 @@
sp<AudioPatch> AudioPolicyManager::createTelephonyPatch(
bool isRx, audio_devices_t device, uint32_t delayMs) {
- struct audio_patch patch;
- patch.num_sources = 1;
- patch.num_sinks = 1;
+ PatchBuilder patchBuilder;
sp<DeviceDescriptor> txSourceDeviceDesc;
if (isRx) {
- fillAudioPortConfigForDevice(mAvailableOutputDevices, device, &patch.sinks[0]);
- fillAudioPortConfigForDevice(
- mAvailableInputDevices, AUDIO_DEVICE_IN_TELEPHONY_RX, &patch.sources[0]);
+ patchBuilder.addSink(findDevice(mAvailableOutputDevices, device)).
+ addSource(findDevice(mAvailableInputDevices, AUDIO_DEVICE_IN_TELEPHONY_RX));
} else {
- txSourceDeviceDesc = fillAudioPortConfigForDevice(
- mAvailableInputDevices, device, &patch.sources[0]);
- fillAudioPortConfigForDevice(
- mAvailableOutputDevices, AUDIO_DEVICE_OUT_TELEPHONY_TX, &patch.sinks[0]);
+ patchBuilder.addSource(txSourceDeviceDesc = findDevice(mAvailableInputDevices, device)).
+ addSink(findDevice(mAvailableOutputDevices, AUDIO_DEVICE_OUT_TELEPHONY_TX));
}
audio_devices_t outputDevice = isRx ? device : AUDIO_DEVICE_OUT_TELEPHONY_TX;
@@ -500,9 +495,7 @@
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
ALOG_ASSERT(!outputDesc->isDuplicated(),
"%s() %#x device output %d is duplicated", __func__, outputDevice, output);
- outputDesc->toAudioPortConfig(&patch.sources[1]);
- patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
- patch.num_sources = 2;
+ patchBuilder.addSource(outputDesc, { .stream = AUDIO_STREAM_PATCH });
}
if (!isRx) {
@@ -524,26 +517,25 @@
}
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- status_t status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs);
+ status_t status = mpClientInterface->createAudioPatch(
+ patchBuilder.patch(), &afPatchHandle, delayMs);
ALOGW_IF(status != NO_ERROR,
"%s() error %d creating %s audio patch", __func__, status, isRx ? "RX" : "TX");
sp<AudioPatch> audioPatch;
if (status == NO_ERROR) {
- audioPatch = new AudioPatch(&patch, mUidCached);
+ audioPatch = new AudioPatch(patchBuilder.patch(), mUidCached);
audioPatch->mAfPatchHandle = afPatchHandle;
audioPatch->mUid = mUidCached;
}
return audioPatch;
}
-sp<DeviceDescriptor> AudioPolicyManager::fillAudioPortConfigForDevice(
- const DeviceVector& devices, audio_devices_t device, audio_port_config *config) {
- DeviceVector deviceList = devices.getDevicesFromType(device);
+sp<DeviceDescriptor> AudioPolicyManager::findDevice(
+ const DeviceVector& devices, audio_devices_t device) {
+ DeviceVector deviceList = devices.getDevicesFromTypeMask(device);
ALOG_ASSERT(!deviceList.isEmpty(),
"%s() selected device type %#x is not in devices list", __func__, device);
- sp<DeviceDescriptor> deviceDesc = deviceList.itemAt(0);
- deviceDesc->toAudioPortConfig(config);
- return deviceDesc;
+ return deviceList.itemAt(0);
}
void AudioPolicyManager::setPhoneState(audio_mode_t state)
@@ -557,14 +549,8 @@
return;
}
/// Opens: can these line be executed after the switch of volume curves???
- // if leaving call state, handle special case of active streams
- // pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(oldState)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
- handleIncallSonification((audio_stream_type_t)stream, false, true);
- }
-
// force reevaluating accessibility routing when call stops
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
@@ -577,9 +563,7 @@
|| (is_state_in_call(state) && (state != oldState)));
// check for device and output changes triggered by new phone state
- checkA2dpSuspend();
- checkOutputForAllStrategies();
- updateDevicesAndOutputs();
+ checkForDeviceAndOutputChanges();
int delayMs = 0;
if (isStateInCall(state)) {
@@ -643,14 +627,8 @@
}
}
- // if entering in call state, handle special case of active streams
- // pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(state)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
- handleIncallSonification((audio_stream_type_t)stream, true, true);
- }
-
// force reevaluating accessibility routing when call starts
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
@@ -685,9 +663,7 @@
(usage == AUDIO_POLICY_FORCE_FOR_SYSTEM);
// check for device and output changes triggered by new force usage
- checkA2dpSuspend();
- checkOutputForAllStrategies();
- updateDevicesAndOutputs();
+ checkForDeviceAndOutputChanges();
//FIXME: workaround for truncated touch sounds
// to be removed when the problem is handled by system UI
@@ -819,39 +795,53 @@
stream_type_to_audio_attributes(*stream, &attributes);
}
- // TODO: check for existing client for this port ID
- if (*portId == AUDIO_PORT_HANDLE_NONE) {
- *portId = AudioPort::getNextUniqueId();
- }
-
- sp<SwAudioOutputDescriptor> desc;
- if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
- ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
- if (!audio_has_proportional_frames(config->format)) {
- return BAD_VALUE;
- }
- *stream = streamTypefromAttributesInt(&attributes);
- *output = desc->mIoHandle;
- ALOGV("getOutputForAttr() returns output %d", *output);
- return NO_ERROR;
- }
- if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
- ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
- return BAD_VALUE;
- }
-
ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
" session %d selectedDeviceId %d",
attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
session, *selectedDeviceId);
- *stream = streamTypefromAttributesInt(&attributes);
+ // TODO: check for existing client for this port ID
+ if (*portId == AUDIO_PORT_HANDLE_NONE) {
+ *portId = AudioPort::getNextUniqueId();
+ }
- // Explicit routing?
+ // First check for explicit routing (eg. setPreferredDevice)
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
+ } else {
+ // If no explict route, is there a matching dynamic policy that applies?
+ sp<SwAudioOutputDescriptor> desc;
+ if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
+ ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
+ if (!audio_has_proportional_frames(config->format)) {
+ return BAD_VALUE;
+ }
+ *stream = streamTypefromAttributesInt(&attributes);
+ *output = desc->mIoHandle;
+ ALOGV("getOutputForAttr() returns output %d", *output);
+ return NO_ERROR;
+ }
+
+ // Virtual sources must always be dynamicaly or explicitly routed
+ if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+ ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+ return BAD_VALUE;
+ }
}
+
+ // Virtual sources must always be dynamicaly or explicitly routed
+ if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+ ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+ return BAD_VALUE;
+ }
+
+ *stream = streamTypefromAttributesInt(&attributes);
+
+ // TODO: Should this happen only if an explicit route is active?
+ // the previous code structure meant that this would always happen which
+ // would appear to result in adding a null deviceDesc when not using an
+ // explicit route. Is that the intended and necessary behavior?
mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
@@ -871,7 +861,7 @@
return INVALID_OPERATION;
}
- DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
+ DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
*selectedDeviceId = outputDevices.size() > 0 ? outputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
@@ -976,7 +966,7 @@
sp<SwAudioOutputDescriptor> outputDesc =
new SwAudioOutputDescriptor(profile, mpClientInterface);
- DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
+ DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(device);
String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
: String8("");
@@ -1285,11 +1275,6 @@
const uint32_t muteWaitMs =
setOutputDevice(outputDesc, device, force, 0, NULL, address, requiresMuteCheck);
- // handle special case for sonification while in call
- if (isInCall()) {
- handleIncallSonification(stream, true, false);
- }
-
// apply volume rules for current stream and device if necessary
checkAndSetVolume(stream,
mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
@@ -1384,11 +1369,6 @@
// always handle stream stop, check which stream type is stopping
handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
- // handle special case for sonification while in call
- if (isInCall()) {
- handleIncallSonification(stream, false, false);
- }
-
if (outputDesc->mRefCount[stream] > 0) {
// decrement usage count of this stream on the output
outputDesc->changeRefCount(stream, -1);
@@ -1545,7 +1525,7 @@
if (*portId == AUDIO_PORT_HANDLE_NONE) {
*portId = AudioPort::getNextUniqueId();
}
- inputDevices = mAvailableInputDevices.getDevicesFromType(inputDesc->mDevice);
+ inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(inputDesc->mDevice);
*selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
ALOGI("%s reusing MMAP input %d for session %d", __FUNCTION__, *input, session);
@@ -1612,7 +1592,7 @@
goto error;
}
- inputDevices = mAvailableInputDevices.getDevicesFromType(device);
+ inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
*selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
@@ -1780,7 +1760,7 @@
lConfig.format = profileFormat;
if (address == "") {
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
+ DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(device);
// the inputs vector must be of size >= 1, but we don't want to crash here
address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
}
@@ -2279,11 +2259,10 @@
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
- if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+ if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
continue;
}
- if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
- (isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
+ if (!(desc->isStreamActive((audio_stream_type_t)curStream) || isInCall())) {
continue;
}
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
@@ -2301,13 +2280,15 @@
applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
stream, curStreamDevice);
}
-
+ // rescale index before applying to curStream as ranges may be different for
+ // stream and curStream
+ int idx = rescaleVolumeIndex(index, stream, (audio_stream_type_t)curStream);
if (applyVolume) {
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
status_t volStatus =
- checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice,
+ checkAndSetVolume((audio_stream_type_t)curStream, idx, desc, curDevice,
(stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
if (volStatus != NO_ERROR) {
status = volStatus;
@@ -2627,42 +2608,24 @@
status_t AudioPolicyManager::dump(int fd)
{
- const size_t SIZE = 256;
- char buffer[SIZE];
String8 result;
-
- snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
- result.append(buffer);
-
- snprintf(buffer, SIZE, " Primary Output: %d\n",
+ result.appendFormat("\nAudioPolicyManager Dump: %p\n", this);
+ result.appendFormat(" Primary Output: %d\n",
hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
- result.append(buffer);
std::string stateLiteral;
AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
- snprintf(buffer, SIZE, " Phone state: %s\n", stateLiteral.c_str());
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for communications %d\n",
- mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for media %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for record %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for dock %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for system %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
- mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
- result.append(buffer);
- snprintf(buffer, SIZE, " Force use for encoded surround output %d\n",
- mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND));
- result.append(buffer);
- snprintf(buffer, SIZE, " TTS output %s\n", mTtsOutputAvailable ? "available" : "not available");
- result.append(buffer);
- snprintf(buffer, SIZE, " Master mono: %s\n", mMasterMono ? "on" : "off");
- result.append(buffer);
-
+ result.appendFormat(" Phone state: %s\n", stateLiteral.c_str());
+ const char* forceUses[AUDIO_POLICY_FORCE_USE_CNT] = {
+ "communications", "media", "record", "dock", "system",
+ "HDMI system audio", "encoded surround output", "vibrate ringing" };
+ for (audio_policy_force_use_t i = AUDIO_POLICY_FORCE_FOR_COMMUNICATION;
+ i < AUDIO_POLICY_FORCE_USE_CNT; i = (audio_policy_force_use_t)((int)i + 1)) {
+ result.appendFormat(" Force use for %s: %d\n",
+ forceUses[i], mEngine->getForceUse(i));
+ }
+ result.appendFormat(" TTS output %savailable\n", mTtsOutputAvailable ? "" : "not ");
+ result.appendFormat(" Master mono: %s\n", mMasterMono ? "on" : "off");
+ result.appendFormat(" Config source: %s\n", getConfig().getSource().c_str());
write(fd, result.string(), result.size());
mAvailableOutputDevices.dump(fd, String8("Available output"));
@@ -2858,8 +2821,7 @@
}
ALOGV("createAudioPatch() num sources %d num sinks %d", patch->num_sources, patch->num_sinks);
- if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
- patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+ if (!audio_patch_is_valid(patch)) {
return BAD_VALUE;
}
// only one source per audio patch supported for now
@@ -3078,28 +3040,8 @@
}
// TODO: check from routing capabilities in config file and other conflicting patches
- audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- if (index >= 0) {
- afPatchHandle = patchDesc->mAfPatchHandle;
- }
-
- status_t status = mpClientInterface->createAudioPatch(&newPatch,
- &afPatchHandle,
- 0);
- ALOGV("createAudioPatch() patch panel returned %d patchHandle %d",
- status, afPatchHandle);
- if (status == NO_ERROR) {
- if (index < 0) {
- patchDesc = new AudioPatch(&newPatch, uid);
- addAudioPatch(patchDesc->mHandle, patchDesc);
- } else {
- patchDesc->mPatch = newPatch;
- }
- patchDesc->mAfPatchHandle = afPatchHandle;
- *handle = patchDesc->mHandle;
- nextAudioPortGeneration();
- mpClientInterface->onAudioPatchListUpdate();
- } else {
+ status_t status = installPatch(__func__, index, handle, &newPatch, 0, uid, &patchDesc);
+ if (status != NO_ERROR) {
ALOGW("createAudioPatch() patch panel could not connect device patch, error %d",
status);
return INVALID_OPERATION;
@@ -3232,10 +3174,10 @@
return BAD_VALUE;
}
- struct audio_port_config backupConfig;
+ struct audio_port_config backupConfig = {};
status_t status = audioPortConfig->applyAudioPortConfig(config, &backupConfig);
if (status == NO_ERROR) {
- struct audio_port_config newConfig;
+ struct audio_port_config newConfig = {};
audioPortConfig->toAudioPortConfig(&newConfig, config);
status = mpClientInterface->setAudioPortConfig(&newConfig, 0);
}
@@ -3383,7 +3325,7 @@
sp<AudioSourceDescriptor> sourceDesc =
new AudioSourceDescriptor(srcDeviceDesc, attributes, uid);
- struct audio_patch dummyPatch;
+ struct audio_patch dummyPatch = {};
sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
sourceDesc->mPatchDesc = patchDesc;
@@ -3411,7 +3353,6 @@
mAvailableOutputDevices.getDevice(sinkDevice, String8(""));
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- struct audio_patch *patch = &sourceDesc->mPatchDesc->mPatch;
if (srcDeviceDesc->getAudioPort()->mModule->getHandle() ==
sinkDeviceDesc->getAudioPort()->mModule->getHandle() &&
@@ -3443,16 +3384,14 @@
// be connected as well as the stream type for volume control
// - the sink is defined by whatever output device is currently selected for the output
// though which this patch is routed.
- patch->num_sinks = 0;
- patch->num_sources = 2;
- srcDeviceDesc->toAudioPortConfig(&patch->sources[0], NULL);
- outputDesc->toAudioPortConfig(&patch->sources[1], NULL);
- patch->sources[1].ext.mix.usecase.stream = stream;
- status = mpClientInterface->createAudioPatch(patch,
+ PatchBuilder patchBuilder;
+ patchBuilder.addSource(srcDeviceDesc).addSource(outputDesc, { .stream = stream });
+ status = mpClientInterface->createAudioPatch(patchBuilder.patch(),
&afPatchHandle,
0);
ALOGV("%s patch panel returned %d patchHandle %d", __FUNCTION__,
status, afPatchHandle);
+ sourceDesc->mPatchDesc->mPatch = *patchBuilder.patch();
if (status != NO_ERROR) {
ALOGW("%s patch panel could not connect device patch, error %d",
__FUNCTION__, status);
@@ -3615,7 +3554,7 @@
}
}
// Open an output to query dynamic parameters.
- DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromType(
+ DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(
AUDIO_DEVICE_OUT_HDMI);
for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
String8 address = hdmiOutputDevices[i]->mAddress;
@@ -3741,7 +3680,7 @@
sp<SwAudioOutputDescriptor> outputDesc;
bool profileUpdated = false;
- DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromType(
+ DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(
AUDIO_DEVICE_OUT_HDMI);
for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
// Simulate reconnection to update enabled surround sound formats.
@@ -3760,7 +3699,7 @@
name.c_str());
profileUpdated |= (status == NO_ERROR);
}
- DeviceVector hdmiInputDevices = mAvailableInputDevices.getDevicesFromType(
+ DeviceVector hdmiInputDevices = mAvailableInputDevices.getDevicesFromTypeMask(
AUDIO_DEVICE_IN_HDMI);
for (size_t i = 0; i < hdmiInputDevices.size(); i++) {
// Simulate reconnection to update enabled surround sound formats.
@@ -3904,6 +3843,7 @@
"%s/%s", kConfigLocationList[i], fileName);
ret = serializer.deserialize(audioPolicyXmlConfigFile, config);
if (ret == NO_ERROR) {
+ config.setSource(audioPolicyXmlConfigFile);
return ret;
}
}
@@ -3915,7 +3855,7 @@
AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface,
bool /*forTesting*/)
:
- mUidCached(getuid()),
+ mUidCached(AID_AUDIOSERVER), // no need to call getuid(), there's only one of us running.
mpClientInterface(clientInterface),
mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
mA2dpSuspended(false),
@@ -4026,7 +3966,8 @@
sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
mpClientInterface);
const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
- const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
+ const DeviceVector &devicesForType = supportedDevices.getDevicesFromTypeMask(
+ profileType);
String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
: String8("");
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
@@ -4080,7 +4021,7 @@
sp<AudioInputDescriptor> inputDesc =
new AudioInputDescriptor(inProfile, mpClientInterface);
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
+ DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromTypeMask(profileType);
// the inputs vector must be of size >= 1, but we don't want to crash here
String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
: String8("");
@@ -4700,6 +4641,21 @@
return true;
}
+void AudioPolicyManager::checkForDeviceAndOutputChanges()
+{
+ checkForDeviceAndOutputChanges([](){ return false; });
+}
+
+void AudioPolicyManager::checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked)
+{
+ // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
+ // output is suspended before any tracks are moved to it
+ checkA2dpSuspend();
+ checkOutputForAllStrategies();
+ if (onOutputsChecked()) checkA2dpSuspend();
+ updateDevicesAndOutputs();
+}
+
void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
{
audio_devices_t oldDevice = getDeviceForStrategy(strategy, true /*fromCache*/);
@@ -5249,54 +5205,20 @@
} else {
DeviceVector deviceList;
if ((address == NULL) || (strlen(address) == 0)) {
- deviceList = mAvailableOutputDevices.getDevicesFromType(device);
+ deviceList = mAvailableOutputDevices.getDevicesFromTypeMask(device);
} else {
- deviceList = mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
+ sp<DeviceDescriptor> deviceDesc = mAvailableOutputDevices.getDevice(
+ device, String8(address));
+ if (deviceDesc) deviceList.add(deviceDesc);
}
if (!deviceList.isEmpty()) {
- struct audio_patch patch;
- outputDesc->toAudioPortConfig(&patch.sources[0]);
- patch.num_sources = 1;
- patch.num_sinks = 0;
+ PatchBuilder patchBuilder;
+ patchBuilder.addSource(outputDesc);
for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++) {
- deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
- patch.num_sinks++;
+ patchBuilder.addSink(deviceList.itemAt(i));
}
- ssize_t index;
- if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
- index = mAudioPatches.indexOfKey(*patchHandle);
- } else {
- index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
- }
- sp< AudioPatch> patchDesc;
- audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- if (index >= 0) {
- patchDesc = mAudioPatches.valueAt(index);
- afPatchHandle = patchDesc->mAfPatchHandle;
- }
-
- status_t status = mpClientInterface->createAudioPatch(&patch,
- &afPatchHandle,
- delayMs);
- ALOGV("setOutputDevice() createAudioPatch returned %d patchHandle %d"
- "num_sources %d num_sinks %d",
- status, afPatchHandle, patch.num_sources, patch.num_sinks);
- if (status == NO_ERROR) {
- if (index < 0) {
- patchDesc = new AudioPatch(&patch, mUidCached);
- addAudioPatch(patchDesc->mHandle, patchDesc);
- } else {
- patchDesc->mPatch = patch;
- }
- patchDesc->mAfPatchHandle = afPatchHandle;
- if (patchHandle) {
- *patchHandle = patchDesc->mHandle;
- }
- outputDesc->setPatchHandle(patchDesc->mHandle);
- nextAudioPortGeneration();
- mpClientInterface->onAudioPatchListUpdate();
- }
+ installPatch(__func__, patchHandle, outputDesc.get(), patchBuilder.patch(), delayMs);
}
// inform all input as well
@@ -5354,53 +5276,21 @@
if ((device != AUDIO_DEVICE_NONE) && ((device != inputDesc->mDevice) || force)) {
inputDesc->mDevice = device;
- DeviceVector deviceList = mAvailableInputDevices.getDevicesFromType(device);
+ DeviceVector deviceList = mAvailableInputDevices.getDevicesFromTypeMask(device);
if (!deviceList.isEmpty()) {
- struct audio_patch patch;
- inputDesc->toAudioPortConfig(&patch.sinks[0]);
+ PatchBuilder patchBuilder;
+ patchBuilder.addSink(inputDesc,
// AUDIO_SOURCE_HOTWORD is for internal use only:
// handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
- if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD &&
- !inputDesc->isSoundTrigger()) {
- patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
- }
- patch.num_sinks = 1;
+ [inputDesc](const PatchBuilder::mix_usecase_t& usecase) {
+ auto result = usecase;
+ if (result.source == AUDIO_SOURCE_HOTWORD && !inputDesc->isSoundTrigger()) {
+ result.source = AUDIO_SOURCE_VOICE_RECOGNITION;
+ }
+ return result; }).
//only one input device for now
- deviceList.itemAt(0)->toAudioPortConfig(&patch.sources[0]);
- patch.num_sources = 1;
- ssize_t index;
- if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
- index = mAudioPatches.indexOfKey(*patchHandle);
- } else {
- index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
- }
- sp< AudioPatch> patchDesc;
- audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- if (index >= 0) {
- patchDesc = mAudioPatches.valueAt(index);
- afPatchHandle = patchDesc->mAfPatchHandle;
- }
-
- status_t status = mpClientInterface->createAudioPatch(&patch,
- &afPatchHandle,
- 0);
- ALOGV("setInputDevice() createAudioPatch returned %d patchHandle %d",
- status, afPatchHandle);
- if (status == NO_ERROR) {
- if (index < 0) {
- patchDesc = new AudioPatch(&patch, mUidCached);
- addAudioPatch(patchDesc->mHandle, patchDesc);
- } else {
- patchDesc->mPatch = patch;
- }
- patchDesc->mAfPatchHandle = afPatchHandle;
- if (patchHandle) {
- *patchHandle = patchDesc->mHandle;
- }
- inputDesc->setPatchHandle(patchDesc->mHandle);
- nextAudioPortGeneration();
- mpClientInterface->onAudioPatchListUpdate();
- }
+ addSource(deviceList.itemAt(0));
+ status = installPatch(__func__, patchHandle, inputDesc.get(), patchBuilder.patch(), 0);
}
}
return status;
@@ -5534,8 +5424,8 @@
return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
}
- // in-call: always cap earpiece volume by voice volume + some low headroom
- if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) &&
+ // in-call: always cap volume by voice volume + some low headroom
+ if ((stream != AUDIO_STREAM_VOICE_CALL) &&
(isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
switch (stream) {
case AUDIO_STREAM_SYSTEM:
@@ -5547,9 +5437,9 @@
case AUDIO_STREAM_DTMF:
case AUDIO_STREAM_ACCESSIBILITY: {
int voiceVolumeIndex =
- mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, AUDIO_DEVICE_OUT_EARPIECE);
+ mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
const float maxVoiceVolDb =
- computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, AUDIO_DEVICE_OUT_EARPIECE)
+ computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
+ IN_CALL_EARPIECE_HEADROOM_DB;
if (volumeDB > maxVoiceVolDb) {
ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
@@ -5620,6 +5510,21 @@
return volumeDB;
}
+int AudioPolicyManager::rescaleVolumeIndex(int srcIndex,
+ audio_stream_type_t srcStream,
+ audio_stream_type_t dstStream)
+{
+ if (srcStream == dstStream) {
+ return srcIndex;
+ }
+ float minSrc = (float)mVolumeCurves->getVolumeIndexMin(srcStream);
+ float maxSrc = (float)mVolumeCurves->getVolumeIndexMax(srcStream);
+ float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
+ float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
+
+ return (int)(minDst + ((srcIndex - minSrc) * (maxDst - minDst)) / (maxSrc - minSrc));
+}
+
status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
int index,
const sp<AudioOutputDescriptor>& outputDesc,
@@ -5746,55 +5651,6 @@
}
}
-void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
- bool starting, bool stateChange)
-{
- if(!hasPrimaryOutput()) {
- return;
- }
-
- // if the stream pertains to sonification strategy and we are in call we must
- // mute the stream if it is low visibility. If it is high visibility, we must play a tone
- // in the device used for phone strategy and play the tone if the selected device does not
- // interfere with the device used for phone strategy
- // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
- // many times as there are active tracks on the output
- const routing_strategy stream_strategy = getStrategy(stream);
- if ((stream_strategy == STRATEGY_SONIFICATION) ||
- ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) {
- sp<SwAudioOutputDescriptor> outputDesc = mPrimaryOutput;
- ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
- stream, starting, outputDesc->mDevice, stateChange);
- if (outputDesc->mRefCount[stream]) {
- int muteCount = 1;
- if (stateChange) {
- muteCount = outputDesc->mRefCount[stream];
- }
- if (audio_is_low_visibility(stream)) {
- ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
- for (int i = 0; i < muteCount; i++) {
- setStreamMute(stream, starting, mPrimaryOutput);
- }
- } else {
- ALOGV("handleIncallSonification() high visibility");
- if (outputDesc->device() &
- getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) {
- ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
- for (int i = 0; i < muteCount; i++) {
- setStreamMute(stream, starting, mPrimaryOutput);
- }
- }
- if (starting) {
- mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
- AUDIO_STREAM_VOICE_CALL);
- } else {
- mpClientInterface->stopTone();
- }
- }
- }
- }
-}
-
audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
{
// flags to stream type mapping
@@ -6170,4 +6026,58 @@
}
}
+status_t AudioPolicyManager::installPatch(const char *caller,
+ audio_patch_handle_t *patchHandle,
+ AudioIODescriptorInterface *ioDescriptor,
+ const struct audio_patch *patch,
+ int delayMs)
+{
+ ssize_t index = mAudioPatches.indexOfKey(
+ patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE ?
+ *patchHandle : ioDescriptor->getPatchHandle());
+ sp<AudioPatch> patchDesc;
+ status_t status = installPatch(
+ caller, index, patchHandle, patch, delayMs, mUidCached, &patchDesc);
+ if (status == NO_ERROR) {
+ ioDescriptor->setPatchHandle(patchDesc->mHandle);
+ }
+ return status;
+}
+
+status_t AudioPolicyManager::installPatch(const char *caller,
+ ssize_t index,
+ audio_patch_handle_t *patchHandle,
+ const struct audio_patch *patch,
+ int delayMs,
+ uid_t uid,
+ sp<AudioPatch> *patchDescPtr)
+{
+ sp<AudioPatch> patchDesc;
+ audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ if (index >= 0) {
+ patchDesc = mAudioPatches.valueAt(index);
+ afPatchHandle = patchDesc->mAfPatchHandle;
+ }
+
+ status_t status = mpClientInterface->createAudioPatch(patch, &afPatchHandle, delayMs);
+ ALOGV("%s() AF::createAudioPatch returned %d patchHandle %d num_sources %d num_sinks %d",
+ caller, status, afPatchHandle, patch->num_sources, patch->num_sinks);
+ if (status == NO_ERROR) {
+ if (index < 0) {
+ patchDesc = new AudioPatch(patch, uid);
+ addAudioPatch(patchDesc->mHandle, patchDesc);
+ } else {
+ patchDesc->mPatch = *patch;
+ }
+ patchDesc->mAfPatchHandle = afPatchHandle;
+ if (patchHandle) {
+ *patchHandle = patchDesc->mHandle;
+ }
+ nextAudioPortGeneration();
+ mpClientInterface->onAudioPatchListUpdate();
+ }
+ if (patchDescPtr) *patchDescPtr = patchDesc;
+ return status;
+}
+
} // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 48e0472..70ca39f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -17,6 +17,7 @@
#pragma once
#include <atomic>
+#include <functional>
#include <memory>
#include <unordered_set>
@@ -353,6 +354,10 @@
int index,
audio_devices_t device);
+ // rescale volume index from srcStream within range of dstStream
+ int rescaleVolumeIndex(int srcIndex,
+ audio_stream_type_t srcStream,
+ audio_stream_type_t dstStream);
// check that volume change is permitted, compute and send new volume to audio hardware
virtual status_t checkAndSetVolume(audio_stream_type_t stream, int index,
const sp<AudioOutputDescriptor>& outputDesc,
@@ -377,10 +382,6 @@
int delayMs = 0,
audio_devices_t device = (audio_devices_t)0);
- // handle special cases for sonification strategy while in call: mute streams or replace by
- // a special tone in the device used for communication
- void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
-
audio_mode_t getPhoneState();
// true if device is in a telephony or VoIP call
@@ -410,6 +411,13 @@
// close an input.
void closeInput(audio_io_handle_t input);
+ // runs all the checks required for accomodating changes in devices and outputs
+ // if 'onOutputsChecked' callback is provided, it is executed after the outputs
+ // check via 'checkOutputForAllStrategies'. If the callback returns 'true',
+ // A2DP suspend status is rechecked.
+ void checkForDeviceAndOutputChanges();
+ void checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked);
+
// checks and if necessary changes outputs used for all strategies.
// must be called every time a condition that affects the output choice for a given strategy
// changes: connected device, phone state, force use...
@@ -506,8 +514,8 @@
uint32_t updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs = 0);
sp<AudioPatch> createTelephonyPatch(bool isRx, audio_devices_t device, uint32_t delayMs);
- sp<DeviceDescriptor> fillAudioPortConfigForDevice(
- const DeviceVector& devices, audio_devices_t device, audio_port_config *config);
+ sp<DeviceDescriptor> findDevice(
+ const DeviceVector& devices, audio_devices_t device);
// if argument "device" is different from AUDIO_DEVICE_NONE, startSource() will force
// the re-evaluation of the output device.
@@ -544,7 +552,7 @@
static bool streamsMatchForvolume(audio_stream_type_t stream1,
audio_stream_type_t stream2);
- uid_t mUidCached;
+ const uid_t mUidCached; // AID_AUDIOSERVER
AudioPolicyClientInterface *mpClientInterface; // audio policy client interface
sp<SwAudioOutputDescriptor> mPrimaryOutput; // primary output descriptor
// list of descriptors for outputs currently opened
@@ -684,6 +692,18 @@
param.addInt(String8(AudioParameter::keyMonoOutput), (int)mMasterMono);
mpClientInterface->setParameters(output, param.toString());
}
+ status_t installPatch(const char *caller,
+ audio_patch_handle_t *patchHandle,
+ AudioIODescriptorInterface *ioDescriptor,
+ const struct audio_patch *patch,
+ int delayMs);
+ status_t installPatch(const char *caller,
+ ssize_t index,
+ audio_patch_handle_t *patchHandle,
+ const struct audio_patch *patch,
+ int delayMs,
+ uid_t uid,
+ sp<AudioPatch> *patchDescPtr);
bool soundTriggerSupportsConcurrentCapture();
bool mSoundTriggerSupportsConcurrentCapture;
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index b064f8c..21fffec 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -155,17 +155,6 @@
return result;
}
-status_t AudioPolicyService::AudioPolicyClient::startTone(audio_policy_tone_t tone,
- audio_stream_type_t stream)
-{
- return mAudioPolicyService->startTone(tone, stream);
-}
-
-status_t AudioPolicyService::AudioPolicyClient::stopTone()
-{
- return mAudioPolicyService->stopTone();
-}
-
status_t AudioPolicyService::AudioPolicyClient::setVoiceVolume(float volume, int delay_ms)
{
return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index c7dfe0f..fdae23b 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -24,6 +24,7 @@
#include <cutils/misc.h>
#include <media/AudioEffect.h>
#include <media/EffectsConfig.h>
+#include <mediautils/ServiceUtilities.h>
#include <system/audio.h>
#include <system/audio_effects/audio_effects_conf.h>
#include <utils/Vector.h>
@@ -31,7 +32,6 @@
#include <cutils/config_utils.h>
#include <binder/IPCThreadState.h>
#include "AudioPolicyEffects.h"
-#include "ServiceUtilities.h"
namespace android {
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 7337f04..859072b 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -17,12 +17,12 @@
#define LOG_TAG "AudioPolicyIntefaceImpl"
//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-#include <media/MediaAnalyticsItem.h>
-
#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
#include "TypeConverter.h"
+#include <media/AudioPolicyHelper.h>
+#include <media/MediaAnalyticsItem.h>
+#include <mediautils/ServiceUtilities.h>
+#include <utils/Log.h>
namespace android {
@@ -183,7 +183,7 @@
Mutex::Autolock _l(mLock);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isTrustedCallingUid(callingUid) || uid == (uid_t)-1) {
+ if (!isAudioServerOrMediaServerUid(callingUid) || uid == (uid_t)-1) {
ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
uid = callingUid;
@@ -208,93 +208,128 @@
config,
&flags, selectedDeviceId, portId);
}
+
+ if (result == NO_ERROR) {
+ sp <AudioPlaybackClient> client =
+ new AudioPlaybackClient(*attr, *output, uid, pid, session, *selectedDeviceId, *stream);
+ mAudioPlaybackClients.add(*portId, client);
+ }
return result;
}
-status_t AudioPolicyService::startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioPolicyService::startOutput(audio_port_handle_t portId)
{
- if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
- return BAD_VALUE;
- }
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
ALOGV("startOutput()");
+ sp<AudioPlaybackClient> client;
sp<AudioPolicyEffects>audioPolicyEffects;
{
Mutex::Autolock _l(mLock);
+ const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+ if (index < 0) {
+ ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+ return INVALID_OPERATION;
+ }
+ client = mAudioPlaybackClients.valueAt(index);
audioPolicyEffects = mAudioPolicyEffects;
}
if (audioPolicyEffects != 0) {
// create audio processors according to stream
- status_t status = audioPolicyEffects->addOutputSessionEffects(output, stream, session);
+ status_t status = audioPolicyEffects->addOutputSessionEffects(
+ client->io, client->stream, client->session);
if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to add effects on session %d", session);
+ ALOGW("Failed to add effects on session %d", client->session);
}
}
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- return mAudioPolicyManager->startOutput(output, stream, session);
+ status_t status = mAudioPolicyManager->startOutput(
+ client->io, client->stream, client->session);
+ if (status == NO_ERROR) {
+ client->active = true;
+ }
+ return status;
}
-status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioPolicyService::stopOutput(audio_port_handle_t portId)
{
- if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
- return BAD_VALUE;
+ {
+ Mutex::Autolock _l(mLock);
+
+ const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+ if (index < 0) {
+ ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+ return INVALID_OPERATION;
+ }
}
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
ALOGV("stopOutput()");
- mOutputCommandThread->stopOutputCommand(output, stream, session);
+ mOutputCommandThread->stopOutputCommand(portId);
return NO_ERROR;
}
-status_t AudioPolicyService::doStopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+status_t AudioPolicyService::doStopOutput(audio_port_handle_t portId)
{
- ALOGV("doStopOutput from tid %d", gettid());
+ ALOGV("doStopOutput");
+ sp<AudioPlaybackClient> client;
sp<AudioPolicyEffects>audioPolicyEffects;
{
Mutex::Autolock _l(mLock);
+
+ const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+ if (index < 0) {
+ ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+ return INVALID_OPERATION;
+ }
+ client = mAudioPlaybackClients.valueAt(index);
audioPolicyEffects = mAudioPolicyEffects;
}
if (audioPolicyEffects != 0) {
// release audio processors from the stream
- status_t status = audioPolicyEffects->releaseOutputSessionEffects(output, stream, session);
+ status_t status = audioPolicyEffects->releaseOutputSessionEffects(
+ client->io, client->stream, client->session);
if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to release effects on session %d", session);
+ ALOGW("Failed to release effects on session %d", client->session);
}
}
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- return mAudioPolicyManager->stopOutput(output, stream, session);
+ status_t status = mAudioPolicyManager->stopOutput(
+ client->io, client->stream, client->session);
+ if (status == NO_ERROR) {
+ client->active = false;
+ }
+ return status;
}
-void AudioPolicyService::releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioPolicyService::releaseOutput(audio_port_handle_t portId)
{
if (mAudioPolicyManager == NULL) {
return;
}
ALOGV("releaseOutput()");
- mOutputCommandThread->releaseOutputCommand(output, stream, session);
+ mOutputCommandThread->releaseOutputCommand(portId);
}
-void AudioPolicyService::doReleaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioPolicyService::doReleaseOutput(audio_port_handle_t portId)
{
ALOGV("doReleaseOutput from tid %d", gettid());
Mutex::Autolock _l(mLock);
+ const ssize_t index = mAudioPlaybackClients.indexOfKey(portId);
+ if (index < 0) {
+ ALOGE("%s AudioTrack client not found for portId %d", __FUNCTION__, portId);
+ return;
+ }
+ sp<AudioPlaybackClient> client = mAudioPlaybackClients.valueAt(index);
+ mAudioRecordClients.removeItem(portId);
+
// called from internal thread: no need to clear caller identity
- mAudioPolicyManager->releaseOutput(output, stream, session);
+ mAudioPolicyManager->releaseOutput(
+ client->io, client->stream, client->session);
}
status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
@@ -320,7 +355,7 @@
bool updatePid = (pid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isTrustedCallingUid(callingUid)) {
+ if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
uid = callingUid;
@@ -403,12 +438,8 @@
return status;
}
- sp<AudioRecordClient> client =
- new AudioRecordClient(*attr, *input, uid, pid, opPackageName, session);
- client->active = false;
- client->isConcurrent = false;
- client->isVirtualDevice = false; //TODO : update from APM->getInputForAttr()
- client->deviceId = *selectedDeviceId;
+ sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
+ *selectedDeviceId, opPackageName);
mAudioRecordClients.add(*portId, client);
}
@@ -497,7 +528,7 @@
{
AutoCallerClear acc;
status = mAudioPolicyManager->startInput(
- client->input, client->session, *silenced, &concurrency);
+ client->io, client->session, *silenced, &concurrency);
}
@@ -528,10 +559,13 @@
item->setCString(kAudioPolicyRqstSrc,
audioSourceString(client->attributes.source).c_str());
- item->setCString(kAudioPolicyRqstPkg,
- std::string(String8(client->opPackageName).string()).c_str());
item->setInt32(kAudioPolicyRqstSession, client->session);
-
+ if (client->opPackageName.size() != 0) {
+ item->setCString(kAudioPolicyRqstPkg,
+ std::string(String8(client->opPackageName).string()).c_str());
+ } else {
+ item->setCString(kAudioPolicyRqstPkg, std::to_string(client->uid).c_str());
+ }
item->setCString(
kAudioPolicyRqstDevice, getDeviceTypeStrForPortId(client->deviceId).c_str());
@@ -550,9 +584,14 @@
// keeps the last of the clients marked active
item->setCString(kAudioPolicyActiveSrc,
audioSourceString(other->attributes.source).c_str());
- item->setCString(kAudioPolicyActivePkg,
- std::string(String8(other->opPackageName).string()).c_str());
item->setInt32(kAudioPolicyActiveSession, other->session);
+ if (other->opPackageName.size() != 0) {
+ item->setCString(kAudioPolicyActivePkg,
+ std::string(String8(other->opPackageName).string()).c_str());
+ } else {
+ item->setCString(kAudioPolicyRqstPkg,
+ std::to_string(other->uid).c_str());
+ }
item->setCString(kAudioPolicyActiveDevice,
getDeviceTypeStrForPortId(other->deviceId).c_str());
}
@@ -602,7 +641,7 @@
// finish the recording app op
finishRecording(client->opPackageName, client->uid);
AutoCallerClear acc;
- return mAudioPolicyManager->stopInput(client->input, client->session);
+ return mAudioPolicyManager->stopInput(client->io, client->session);
}
void AudioPolicyService::releaseInput(audio_port_handle_t portId)
@@ -627,15 +666,15 @@
}
if (audioPolicyEffects != 0) {
// release audio processors from the input
- status_t status = audioPolicyEffects->releaseInputEffects(client->input, client->session);
+ status_t status = audioPolicyEffects->releaseInputEffects(client->io, client->session);
if(status != NO_ERROR) {
- ALOGW("Failed to release effects on input %d", client->input);
+ ALOGW("Failed to release effects on input %d", client->io);
}
}
{
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- mAudioPolicyManager->releaseInput(client->input, client->session);
+ mAudioPolicyManager->releaseInput(client->io, client->session);
}
}
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index f3cddc3..8bca221 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -26,7 +26,6 @@
#include <sys/time.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
-#include <cutils/multiuser.h>
#include <cutils/properties.h>
#include <binder/IPCThreadState.h>
#include <binder/ActivityManager.h>
@@ -35,16 +34,14 @@
#include <utils/String16.h>
#include <utils/threads.h>
#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
#include <hardware_legacy/power.h>
#include <media/AudioEffect.h>
#include <media/AudioParameter.h>
+#include <mediautils/ServiceUtilities.h>
#include <system/audio.h>
#include <system/audio_policy.h>
-#include <private/android_filesystem_config.h>
-
namespace android {
static const char kDeadlockedString[] = "AudioPolicyService may be deadlocked\n";
@@ -70,8 +67,6 @@
{
Mutex::Autolock _l(mLock);
- // start tone playback thread
- mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
// start audio commands thread
mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
// start output activity command thread
@@ -93,7 +88,6 @@
AudioPolicyService::~AudioPolicyService()
{
- mTonePlaybackThread->exit();
mAudioCommandThread->exit();
mOutputCommandThread->exit();
@@ -118,13 +112,17 @@
Mutex::Autolock _l(mNotificationClientsLock);
uid_t uid = IPCThreadState::self()->getCallingUid();
- if (mNotificationClients.indexOfKey(uid) < 0) {
+ pid_t pid = IPCThreadState::self()->getCallingPid();
+ int64_t token = ((int64_t)uid<<32) | pid;
+
+ if (mNotificationClients.indexOfKey(token) < 0) {
sp<NotificationClient> notificationClient = new NotificationClient(this,
client,
- uid);
- ALOGV("registerClient() client %p, uid %d", client.get(), uid);
+ uid,
+ pid);
+ ALOGV("registerClient() client %p, uid %d pid %d", client.get(), uid, pid);
- mNotificationClients.add(uid, notificationClient);
+ mNotificationClients.add(token, notificationClient);
sp<IBinder> binder = IInterface::asBinder(client);
binder->linkToDeath(notificationClient);
@@ -136,22 +134,33 @@
Mutex::Autolock _l(mNotificationClientsLock);
uid_t uid = IPCThreadState::self()->getCallingUid();
- if (mNotificationClients.indexOfKey(uid) < 0) {
+ pid_t pid = IPCThreadState::self()->getCallingPid();
+ int64_t token = ((int64_t)uid<<32) | pid;
+
+ if (mNotificationClients.indexOfKey(token) < 0) {
return;
}
- mNotificationClients.valueFor(uid)->setAudioPortCallbacksEnabled(enabled);
+ mNotificationClients.valueFor(token)->setAudioPortCallbacksEnabled(enabled);
}
// removeNotificationClient() is called when the client process dies.
-void AudioPolicyService::removeNotificationClient(uid_t uid)
+void AudioPolicyService::removeNotificationClient(uid_t uid, pid_t pid)
{
{
Mutex::Autolock _l(mNotificationClientsLock);
- mNotificationClients.removeItem(uid);
+ int64_t token = ((int64_t)uid<<32) | pid;
+ mNotificationClients.removeItem(token);
}
{
Mutex::Autolock _l(mLock);
- if (mAudioPolicyManager) {
+ bool hasSameUid = false;
+ for (size_t i = 0; i < mNotificationClients.size(); i++) {
+ if (mNotificationClients.valueAt(i)->uid() == uid) {
+ hasSameUid = true;
+ break;
+ }
+ }
+ if (mAudioPolicyManager && !hasSameUid) {
// called from binder death notification: no need to clear caller identity
mAudioPolicyManager->releaseResourcesForUid(uid);
}
@@ -239,8 +248,9 @@
AudioPolicyService::NotificationClient::NotificationClient(const sp<AudioPolicyService>& service,
const sp<IAudioPolicyServiceClient>& client,
- uid_t uid)
- : mService(service), mUid(uid), mAudioPolicyServiceClient(client),
+ uid_t uid,
+ pid_t pid)
+ : mService(service), mUid(uid), mPid(pid), mAudioPolicyServiceClient(client),
mAudioPortCallbacksEnabled(false)
{
}
@@ -254,7 +264,7 @@
sp<NotificationClient> keep(this);
sp<AudioPolicyService> service = mService.promote();
if (service != 0) {
- service->removeNotificationClient(mUid);
+ service->removeNotificationClient(mUid, mPid);
}
}
@@ -275,7 +285,7 @@
void AudioPolicyService::NotificationClient::onDynamicPolicyMixStateUpdate(
const String8& regId, int32_t state)
{
- if (mAudioPolicyServiceClient != 0 && multiuser_get_app_id(mUid) < AID_APP_START) {
+ if (mAudioPolicyServiceClient != 0 && isServiceUid(mUid)) {
mAudioPolicyServiceClient->onDynamicPolicyMixStateUpdate(regId, state);
}
}
@@ -285,7 +295,7 @@
const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
audio_patch_handle_t patchHandle)
{
- if (mAudioPolicyServiceClient != 0 && multiuser_get_app_id(mUid) < AID_APP_START) {
+ if (mAudioPolicyServiceClient != 0 && isServiceUid(mUid)) {
mAudioPolicyServiceClient->onRecordingConfigurationUpdate(event, clientInfo,
clientConfig, deviceConfig, patchHandle);
}
@@ -325,8 +335,6 @@
result.append(buffer);
snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
result.append(buffer);
- snprintf(buffer, SIZE, "Tones Thread: %p\n", mTonePlaybackThread.get());
- result.append(buffer);
write(fd, result.string(), result.size());
return NO_ERROR;
@@ -362,9 +370,6 @@
if (mAudioCommandThread != 0) {
mAudioCommandThread->dump(fd);
}
- if (mTonePlaybackThread != 0) {
- mTonePlaybackThread->dump(fd);
- }
if (mAudioPolicyManager) {
mAudioPolicyManager->dump(fd);
@@ -577,10 +582,6 @@
updateUidCache(uid, false, true);
}
-bool AudioPolicyService::UidPolicy::isServiceUid(uid_t uid) const {
- return multiuser_get_app_id(uid) < AID_APP_START;
-}
-
void AudioPolicyService::UidPolicy::notifyService(uid_t uid, bool active) {
sp<AudioPolicyService> service = mService.promote();
if (service != nullptr) {
@@ -639,7 +640,6 @@
const wp<AudioPolicyService>& service)
: Thread(false), mName(name), mService(service)
{
- mpToneGenerator = NULL;
}
@@ -649,7 +649,6 @@
release_wake_lock(mName.string());
}
mAudioCommands.clear();
- delete mpToneGenerator;
}
void AudioPolicyService::AudioCommandThread::onFirstRef()
@@ -674,26 +673,6 @@
mLastCommand = command;
switch (command->mCommand) {
- case START_TONE: {
- mLock.unlock();
- ToneData *data = (ToneData *)command->mParam.get();
- ALOGV("AudioCommandThread() processing start tone %d on stream %d",
- data->mType, data->mStream);
- delete mpToneGenerator;
- mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
- mpToneGenerator->startTone(data->mType);
- mLock.lock();
- }break;
- case STOP_TONE: {
- mLock.unlock();
- ALOGV("AudioCommandThread() processing stop tone");
- if (mpToneGenerator != NULL) {
- mpToneGenerator->stopTone();
- delete mpToneGenerator;
- mpToneGenerator = NULL;
- }
- mLock.lock();
- }break;
case SET_VOLUME: {
VolumeData *data = (VolumeData *)command->mParam.get();
ALOGV("AudioCommandThread() processing set volume stream %d, \
@@ -716,26 +695,26 @@
}break;
case STOP_OUTPUT: {
StopOutputData *data = (StopOutputData *)command->mParam.get();
- ALOGV("AudioCommandThread() processing stop output %d",
- data->mIO);
+ ALOGV("AudioCommandThread() processing stop output portId %d",
+ data->mPortId);
svc = mService.promote();
if (svc == 0) {
break;
}
mLock.unlock();
- svc->doStopOutput(data->mIO, data->mStream, data->mSession);
+ svc->doStopOutput(data->mPortId);
mLock.lock();
}break;
case RELEASE_OUTPUT: {
ReleaseOutputData *data = (ReleaseOutputData *)command->mParam.get();
- ALOGV("AudioCommandThread() processing release output %d",
- data->mIO);
+ ALOGV("AudioCommandThread() processing release output portId %d",
+ data->mPortId);
svc = mService.promote();
if (svc == 0) {
break;
}
mLock.unlock();
- svc->doReleaseOutput(data->mIO, data->mStream, data->mSession);
+ svc->doReleaseOutput(data->mPortId);
mLock.lock();
}break;
case CREATE_AUDIO_PATCH: {
@@ -900,27 +879,6 @@
return NO_ERROR;
}
-void AudioPolicyService::AudioCommandThread::startToneCommand(ToneGenerator::tone_type type,
- audio_stream_type_t stream)
-{
- sp<AudioCommand> command = new AudioCommand();
- command->mCommand = START_TONE;
- sp<ToneData> data = new ToneData();
- data->mType = type;
- data->mStream = stream;
- command->mParam = data;
- ALOGV("AudioCommandThread() adding tone start type %d, stream %d", type, stream);
- sendCommand(command);
-}
-
-void AudioPolicyService::AudioCommandThread::stopToneCommand()
-{
- sp<AudioCommand> command = new AudioCommand();
- command->mCommand = STOP_TONE;
- ALOGV("AudioCommandThread() adding tone stop");
- sendCommand(command);
-}
-
status_t AudioPolicyService::AudioCommandThread::volumeCommand(audio_stream_type_t stream,
float volume,
audio_io_handle_t output,
@@ -967,33 +925,25 @@
return sendCommand(command, delayMs);
}
-void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_port_handle_t portId)
{
sp<AudioCommand> command = new AudioCommand();
command->mCommand = STOP_OUTPUT;
sp<StopOutputData> data = new StopOutputData();
- data->mIO = output;
- data->mStream = stream;
- data->mSession = session;
+ data->mPortId = portId;
command->mParam = data;
- ALOGV("AudioCommandThread() adding stop output %d", output);
+ ALOGV("AudioCommandThread() adding stop output portId %d", portId);
sendCommand(command);
}
-void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
+void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_port_handle_t portId)
{
sp<AudioCommand> command = new AudioCommand();
command->mCommand = RELEASE_OUTPUT;
sp<ReleaseOutputData> data = new ReleaseOutputData();
- data->mIO = output;
- data->mStream = stream;
- data->mSession = session;
+ data->mPortId = portId;
command->mParam = data;
- ALOGV("AudioCommandThread() adding release output %d", output);
+ ALOGV("AudioCommandThread() adding release output portId %d", portId);
sendCommand(command);
}
@@ -1212,6 +1162,7 @@
patch = ((CreateAudioPatchData *)command->mParam.get())->mPatch;
} else {
handle = ((ReleaseAudioPatchData *)command->mParam.get())->mHandle;
+ memset(&patch, 0, sizeof(patch));
}
audio_patch_handle_t handle2;
struct audio_patch patch2;
@@ -1256,8 +1207,6 @@
} break;
- case START_TONE:
- case STOP_TONE:
default:
break;
}
@@ -1330,27 +1279,6 @@
output, delayMs);
}
-int AudioPolicyService::startTone(audio_policy_tone_t tone,
- audio_stream_type_t stream)
-{
- if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) {
- ALOGE("startTone: illegal tone requested (%d)", tone);
- }
- if (stream != AUDIO_STREAM_VOICE_CALL) {
- ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
- tone);
- }
- mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
- AUDIO_STREAM_VOICE_CALL);
- return 0;
-}
-
-int AudioPolicyService::stopTone()
-{
- mTonePlaybackThread->stopToneCommand();
- return 0;
-}
-
int AudioPolicyService::setVoiceVolume(float volume, int delayMs)
{
return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
@@ -1406,9 +1334,6 @@
int aps_set_stream_volume(void *service, audio_stream_type_t stream,
float volume, audio_io_handle_t output,
int delay_ms);
-int aps_start_tone(void *service, audio_policy_tone_t tone,
- audio_stream_type_t stream);
-int aps_stop_tone(void *service);
int aps_set_voice_volume(void *service, float volume, int delay_ms);
};
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 407d7a5..d41069e 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -38,8 +38,6 @@
namespace android {
-using namespace std;
-
// ----------------------------------------------------------------------------
class AudioPolicyService :
@@ -83,15 +81,9 @@
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId);
- virtual status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- virtual status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- virtual void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
+ virtual status_t startOutput(audio_port_handle_t portId);
+ virtual status_t stopOutput(audio_port_handle_t portId);
+ virtual void releaseOutput(audio_port_handle_t portId);
virtual status_t getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
@@ -159,8 +151,6 @@
float volume,
audio_io_handle_t output,
int delayMs = 0);
- virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
- virtual status_t stopTone();
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
virtual bool isOffloadSupported(const audio_offload_info_t &config);
@@ -209,12 +199,8 @@
bool reported);
virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
- status_t doStopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- void doReleaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
+ status_t doStopOutput(audio_port_handle_t portId);
+ void doReleaseOutput(audio_port_handle_t portId);
status_t clientCreateAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle,
@@ -224,7 +210,7 @@
virtual status_t clientSetAudioPortConfig(const struct audio_port_config *config,
int delayMs);
- void removeNotificationClient(uid_t uid);
+ void removeNotificationClient(uid_t uid, pid_t pid);
void onAudioPortListUpdate();
void doOnAudioPortListUpdate();
void onAudioPatchListUpdate();
@@ -293,7 +279,6 @@
void removeOverrideUid(uid_t uid) { updateOverrideUid(uid, false, false); }
private:
- bool isServiceUid(uid_t uid) const;
void notifyService(uid_t uid, bool active);
void updateOverrideUid(uid_t uid, bool active, bool insert);
void updateUidCache(uid_t uid, bool active, bool insert);
@@ -307,10 +292,7 @@
std::unordered_map<uid_t, bool> mCachedUids;
};
- // Thread used for tone playback and to send audio config commands to audio flinger
- // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because
- // startTone() and stopTone() are normally called with mLock locked and requesting a tone start
- // or stop will cause calls to AudioPolicyService and an attempt to lock mLock.
+ // Thread used to send audio config commands to audio flinger
// For audio config commands, it is necessary because audio flinger requires that the calling
// process (user) has permission to modify audio settings.
class AudioCommandThread : public Thread {
@@ -319,8 +301,6 @@
// commands for tone AudioCommand
enum {
- START_TONE,
- STOP_TONE,
SET_VOLUME,
SET_PARAMETERS,
SET_VOICE_VOLUME,
@@ -345,20 +325,13 @@
virtual bool threadLoop();
void exit();
- void startToneCommand(ToneGenerator::tone_type type,
- audio_stream_type_t stream);
- void stopToneCommand();
status_t volumeCommand(audio_stream_type_t stream, float volume,
audio_io_handle_t output, int delayMs = 0);
status_t parametersCommand(audio_io_handle_t ioHandle,
const char *keyValuePairs, int delayMs = 0);
status_t voiceVolumeCommand(float volume, int delayMs = 0);
- void stopOutputCommand(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- void releaseOutputCommand(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
+ void stopOutputCommand(audio_port_handle_t portId);
+ void releaseOutputCommand(audio_port_handle_t portId);
status_t sendCommand(sp<AudioCommand>& command, int delayMs = 0);
void insertCommand_l(sp<AudioCommand>& command, int delayMs = 0);
status_t createAudioPatchCommand(const struct audio_patch *patch,
@@ -390,7 +363,7 @@
void dump(char* buffer, size_t size);
- int mCommand; // START_TONE, STOP_TONE ...
+ int mCommand; // SET_VOLUME, SET_PARAMETERS...
nsecs_t mTime; // time stamp
Mutex mLock; // mutex associated to mCond
Condition mCond; // condition for status return
@@ -406,12 +379,6 @@
AudioCommandData() {}
};
- class ToneData : public AudioCommandData {
- public:
- ToneGenerator::tone_type mType; // tone type (START_TONE only)
- audio_stream_type_t mStream; // stream type (START_TONE only)
- };
-
class VolumeData : public AudioCommandData {
public:
audio_stream_type_t mStream;
@@ -432,16 +399,12 @@
class StopOutputData : public AudioCommandData {
public:
- audio_io_handle_t mIO;
- audio_stream_type_t mStream;
- audio_session_t mSession;
+ audio_port_handle_t mPortId;
};
class ReleaseOutputData : public AudioCommandData {
public:
- audio_io_handle_t mIO;
- audio_stream_type_t mStream;
- audio_session_t mSession;
+ audio_port_handle_t mPortId;
};
class CreateAudioPatchData : public AudioCommandData {
@@ -478,7 +441,6 @@
Mutex mLock;
Condition mWaitWorkCV;
Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
- ToneGenerator *mpToneGenerator; // the tone generator
sp<AudioCommand> mLastCommand; // last processed command (used by dump)
String8 mName; // string used by wake lock fo delayed commands
wp<AudioPolicyService> mService;
@@ -553,11 +515,6 @@
// function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
- // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
- // over a telephony device during a phone call.
- virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
- virtual status_t stopTone();
-
// set down link audio volume.
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
@@ -597,7 +554,7 @@
public:
NotificationClient(const sp<AudioPolicyService>& service,
const sp<IAudioPolicyServiceClient>& client,
- uid_t uid);
+ uid_t uid, pid_t pid);
virtual ~NotificationClient();
void onAudioPortListUpdate();
@@ -610,6 +567,10 @@
audio_patch_handle_t patchHandle);
void setAudioPortCallbacksEnabled(bool enabled);
+ uid_t uid() {
+ return mUid;
+ }
+
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
@@ -619,34 +580,61 @@
const wp<AudioPolicyService> mService;
const uid_t mUid;
+ const pid_t mPid;
const sp<IAudioPolicyServiceClient> mAudioPolicyServiceClient;
bool mAudioPortCallbacksEnabled;
};
+ class AudioClient : public virtual RefBase {
+ public:
+ AudioClient(const audio_attributes_t attributes,
+ const audio_io_handle_t io, uid_t uid, pid_t pid,
+ const audio_session_t session, const audio_port_handle_t deviceId) :
+ attributes(attributes), io(io), uid(uid), pid(pid),
+ session(session), deviceId(deviceId), active(false) {}
+ ~AudioClient() override = default;
+
+
+ const audio_attributes_t attributes; // source, flags ...
+ const audio_io_handle_t io; // audio HAL stream IO handle
+ const uid_t uid; // client UID
+ const pid_t pid; // client PID
+ const audio_session_t session; // audio session ID
+ const audio_port_handle_t deviceId; // selected input device port ID
+ bool active; // Playback/Capture is active or inactive
+ };
+
// --- AudioRecordClient ---
// Information about each registered AudioRecord client
// (between calls to getInputForAttr() and releaseInput())
- class AudioRecordClient : public RefBase {
+ class AudioRecordClient : public AudioClient {
public:
AudioRecordClient(const audio_attributes_t attributes,
- const audio_io_handle_t input, uid_t uid, pid_t pid,
- const String16& opPackageName, const audio_session_t session) :
- attributes(attributes),
- input(input), uid(uid), pid(pid),
- opPackageName(opPackageName), session(session),
- active(false), isConcurrent(false), isVirtualDevice(false) {}
- virtual ~AudioRecordClient() {}
+ const audio_io_handle_t io, uid_t uid, pid_t pid,
+ const audio_session_t session, const audio_port_handle_t deviceId,
+ const String16& opPackageName) :
+ AudioClient(attributes, io, uid, pid, session, deviceId),
+ opPackageName(opPackageName), isConcurrent(false), isVirtualDevice(false) {}
+ ~AudioRecordClient() override = default;
- const audio_attributes_t attributes; // source, flags ...
- const audio_io_handle_t input; // audio HAL input IO handle
- const uid_t uid; // client UID
- const pid_t pid; // client PID
const String16 opPackageName; // client package name
- const audio_session_t session; // audio session ID
- bool active; // Capture is active or inactive
bool isConcurrent; // is allowed to concurrent capture
bool isVirtualDevice; // uses virtual device: updated by APM::getInputForAttr()
- audio_port_handle_t deviceId; // selected input device port ID
+ };
+
+ // --- AudioPlaybackClient ---
+ // Information about each registered AudioTrack client
+ // (between calls to getOutputForAttr() and releaseOutput())
+ class AudioPlaybackClient : public AudioClient {
+ public:
+ AudioPlaybackClient(const audio_attributes_t attributes,
+ const audio_io_handle_t io, uid_t uid, pid_t pid,
+ const audio_session_t session, audio_port_handle_t deviceId,
+ audio_stream_type_t stream) :
+ AudioClient(attributes, io, uid, pid, session, deviceId), stream(stream) {}
+ ~AudioPlaybackClient() override = default;
+
+ const audio_stream_type_t stream;
};
// A class automatically clearing and restoring binder caller identity inside
@@ -676,14 +664,13 @@
// mLock protects AudioPolicyManager methods that can call into audio flinger
// and possibly back in to audio policy service and acquire mEffectsLock.
sp<AudioCommandThread> mAudioCommandThread; // audio commands thread
- sp<AudioCommandThread> mTonePlaybackThread; // tone playback thread
sp<AudioCommandThread> mOutputCommandThread; // process stop and release output
struct audio_policy_device *mpAudioPolicyDev;
struct audio_policy *mpAudioPolicy;
AudioPolicyInterface *mAudioPolicyManager;
AudioPolicyClient *mAudioPolicyClient;
- DefaultKeyedVector< uid_t, sp<NotificationClient> > mNotificationClients;
+ DefaultKeyedVector< int64_t, sp<NotificationClient> > mNotificationClients;
Mutex mNotificationClientsLock; // protects mNotificationClients
// Manage all effects configured in audio_effects.conf
sp<AudioPolicyEffects> mAudioPolicyEffects;
@@ -691,6 +678,7 @@
sp<UidPolicy> mUidPolicy;
DefaultKeyedVector< audio_port_handle_t, sp<AudioRecordClient> > mAudioRecordClients;
+ DefaultKeyedVector< audio_port_handle_t, sp<AudioPlaybackClient> > mAudioPlaybackClients;
};
} // namespace android
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index a43daea..b739b88 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -6,7 +6,6 @@
frameworks/av/services/audiopolicy \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy/engine/interface \
- frameworks/av/services/audiopolicy/utilities
LOCAL_SHARED_LIBRARIES := \
libaudiopolicymanagerdefault \
@@ -30,3 +29,26 @@
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
include $(BUILD_NATIVE_TEST)
+
+# system/audio.h utilities test
+
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+ libbase \
+ liblog \
+ libmedia_helper \
+ libutils
+
+LOCAL_SRC_FILES := \
+ systemaudio_tests.cpp \
+
+LOCAL_MODULE := systemaudio_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index eb8222c..2ff7675 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -60,9 +60,6 @@
int /*delayMs*/) override { }
String8 getParameters(audio_io_handle_t /*ioHandle*/,
const String8& /*keys*/) override { return String8(); }
- status_t startTone(audio_policy_tone_t /*tone*/,
- audio_stream_type_t /*stream*/) override { return NO_INIT; }
- status_t stopTone() override { return NO_INIT; }
status_t setVoiceVolume(float /*volume*/, int /*delayMs*/) override { return NO_INIT; }
status_t moveEffects(audio_session_t /*session*/,
audio_io_handle_t /*srcOutput*/,
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index a9593b8..56af152 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -16,9 +16,15 @@
#include <memory>
#include <set>
+#include <sys/wait.h>
+#include <unistd.h>
#include <gtest/gtest.h>
+#define LOG_TAG "APM_Test"
+#include <log/log.h>
+#include <media/PatchBuilder.h>
+
#include "AudioPolicyTestClient.h"
#include "AudioPolicyTestManager.h"
@@ -132,6 +138,36 @@
// SetUp must finish with no assertions.
}
+TEST_F(AudioPolicyManagerTest, Dump) {
+ int pipefd[2];
+ ASSERT_NE(-1, pipe(pipefd));
+ pid_t cpid = fork();
+ ASSERT_NE(-1, cpid);
+ if (cpid == 0) {
+ // Child process reads from the pipe and logs.
+ close(pipefd[1]);
+ std::string line;
+ char buf;
+ while (read(pipefd[0], &buf, sizeof(buf)) > 0) {
+ if (buf != '\n') {
+ line += buf;
+ } else {
+ ALOGI("%s", line.c_str());
+ line = "";
+ }
+ }
+ if (!line.empty()) ALOGI("%s", line.c_str());
+ close(pipefd[0]);
+ _exit(EXIT_SUCCESS);
+ } else {
+ // Parent does the dump and checks the status code.
+ close(pipefd[0]);
+ ASSERT_EQ(NO_ERROR, mManager->dump(pipefd[1]));
+ close(pipefd[1]);
+ wait(NULL); // Wait for the child to exit.
+ }
+}
+
TEST_F(AudioPolicyManagerTest, CreateAudioPatchFailure) {
audio_patch patch{};
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
@@ -166,29 +202,14 @@
}
TEST_F(AudioPolicyManagerTest, CreateAudioPatchFromMix) {
- audio_patch patch{};
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
uid_t uid = 42;
const size_t patchCountBefore = mClient->getActivePatchesCount();
- patch.num_sources = 1;
- {
- auto& src = patch.sources[0];
- src.role = AUDIO_PORT_ROLE_SOURCE;
- src.type = AUDIO_PORT_TYPE_MIX;
- src.id = mManager->getConfig().getAvailableInputDevices()[0]->getId();
- // Note: these are the parameters of the output device.
- src.sample_rate = 44100;
- src.format = AUDIO_FORMAT_PCM_16_BIT;
- src.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
- }
- patch.num_sinks = 1;
- {
- auto& sink = patch.sinks[0];
- sink.role = AUDIO_PORT_ROLE_SINK;
- sink.type = AUDIO_PORT_TYPE_DEVICE;
- sink.id = mManager->getConfig().getDefaultOutputDevice()->getId();
- }
- ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(&patch, &handle, uid));
+ ASSERT_FALSE(mManager->getConfig().getAvailableInputDevices().isEmpty());
+ PatchBuilder patchBuilder;
+ patchBuilder.addSource(mManager->getConfig().getAvailableInputDevices()[0]).
+ addSink(mManager->getConfig().getDefaultOutputDevice());
+ ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(patchBuilder.patch(), &handle, uid));
ASSERT_NE(AUDIO_PATCH_HANDLE_NONE, handle);
ASSERT_EQ(patchCountBefore + 1, mClient->getActivePatchesCount());
}
diff --git a/services/audiopolicy/tests/systemaudio_tests.cpp b/services/audiopolicy/tests/systemaudio_tests.cpp
new file mode 100644
index 0000000..abaae52
--- /dev/null
+++ b/services/audiopolicy/tests/systemaudio_tests.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#define LOG_TAG "SysAudio_Test"
+#include <log/log.h>
+#include <media/PatchBuilder.h>
+#include <system/audio.h>
+
+using namespace android;
+
+TEST(SystemAudioTest, PatchInvalid) {
+ audio_patch patch{};
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+ patch.num_sources = AUDIO_PATCH_PORTS_MAX + 1;
+ patch.num_sinks = 1;
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+ patch.num_sources = 1;
+ patch.num_sinks = AUDIO_PATCH_PORTS_MAX + 1;
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+ patch.num_sources = 0;
+ patch.num_sinks = 1;
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+}
+
+TEST(SystemAudioTest, PatchValid) {
+ const audio_port_config src = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+ // It's OK not to have sinks.
+ ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).patch()));
+ const audio_port_config sink = {
+ .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patch_is_valid(
+ (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patch_is_valid(
+ (PatchBuilder{}).addSource(src).addSink(sink).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patch_is_valid(
+ (PatchBuilder{}).addSource(src).addSource(src).
+ addSink(sink).addSink(sink).patch()));
+}
+
+TEST(SystemAudioTest, PatchHwAvSync) {
+ audio_port_config device_src_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+ device_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+ device_src_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+
+ audio_port_config device_sink_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+ device_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+ device_sink_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+
+ audio_port_config mix_sink_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_MIX };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+ mix_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+ mix_sink_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+
+ audio_port_config mix_src_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_MIX };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+ mix_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+ mix_src_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+}
+
+TEST(SystemAudioTest, PatchEqual) {
+ const audio_patch patch1{}, patch2{};
+ // Invalid patches are not equal.
+ ASSERT_FALSE(audio_patches_are_equal(&patch1, &patch2));
+ const audio_port_config src = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+ const audio_port_config sink = {
+ .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_FALSE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+ ASSERT_FALSE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+ (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+ audio_port_config sink_hw_av_sync = sink;
+ sink_hw_av_sync.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ sink_hw_av_sync.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ ASSERT_FALSE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+ ASSERT_TRUE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+}
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 0c738e7..683e84d 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -33,7 +33,10 @@
FrameProcessorBase(device),
mClient(client),
mLastFrameNumberOfFaces(0),
- mLast3AFrameNumber(-1) {
+ mLast3AFrameNumber(-1),
+ mLastAEFrameNumber(-1),
+ mLastAFrameNumber(-1),
+ mLastAWBFrameNumber(-1) {
sp<CameraDeviceBase> d = device.promote();
mSynthesize3ANotify = !(d->willNotify3A());
@@ -262,24 +265,73 @@
bool gotAllStates = true;
// TODO: Also use AE mode, AE trigger ID
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
+ bool gotAFState = updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
&pendingState.afMode, frameNumber, cameraId);
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
+ bool gotAWBState = updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
&pendingState.awbMode, frameNumber, cameraId);
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
+ bool gotAEState = updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
&pendingState.aeState, frameNumber, cameraId);
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
+ gotAFState &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
&pendingState.afState, frameNumber, cameraId);
- gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
+ gotAWBState &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
&pendingState.awbState, frameNumber, cameraId);
pendingState.afTriggerId = frame.mResultExtras.afTriggerId;
pendingState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
+ if (gotAEState && (frameNumber > mLastAEFrameNumber)) {
+ if (pendingState.aeState != m3aState.aeState ||
+ pendingState.aeTriggerId > m3aState.aeTriggerId) {
+ ALOGV("%s: Camera %d: AE state %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.aeState, pendingState.aeState);
+ client->notifyAutoExposure(pendingState.aeState, pendingState.aeTriggerId);
+
+ m3aState.aeState = pendingState.aeState;
+ m3aState.aeTriggerId = pendingState.aeTriggerId;
+ mLastAEFrameNumber = frameNumber;
+ }
+ }
+
+ if (gotAFState && (frameNumber > mLastAFrameNumber)) {
+ if (pendingState.afState != m3aState.afState ||
+ pendingState.afMode != m3aState.afMode ||
+ pendingState.afTriggerId != m3aState.afTriggerId) {
+ ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.afState, pendingState.afState,
+ m3aState.afMode, pendingState.afMode,
+ m3aState.afTriggerId, pendingState.afTriggerId);
+ client->notifyAutoFocus(pendingState.afState, pendingState.afTriggerId);
+
+ m3aState.afState = pendingState.afState;
+ m3aState.afMode = pendingState.afMode;
+ m3aState.afTriggerId = pendingState.afTriggerId;
+ mLastAFrameNumber = frameNumber;
+ }
+ }
+
+ if (gotAWBState && (frameNumber > mLastAWBFrameNumber)) {
+ if (pendingState.awbState != m3aState.awbState ||
+ pendingState.awbMode != m3aState.awbMode) {
+ ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.awbState, pendingState.awbState,
+ m3aState.awbMode, pendingState.awbMode);
+ client->notifyAutoWhitebalance(pendingState.awbState,
+ pendingState.aeTriggerId);
+
+ m3aState.awbMode = pendingState.awbMode;
+ m3aState.awbState = pendingState.awbState;
+ mLastAWBFrameNumber = frameNumber;
+ }
+ }
+
+ gotAllStates &= gotAEState & gotAFState & gotAWBState;
if (!gotAllStates) {
// If not all states are received, put the pending state to mPending3AStates.
if (index == NAME_NOT_FOUND) {
@@ -290,40 +342,10 @@
return NOT_ENOUGH_DATA;
}
- // Once all 3A states are received, notify the client about 3A changes.
- if (pendingState.aeState != m3aState.aeState ||
- pendingState.aeTriggerId > m3aState.aeTriggerId) {
- ALOGV("%s: Camera %d: AE state %d->%d",
- __FUNCTION__, cameraId,
- m3aState.aeState, pendingState.aeState);
- client->notifyAutoExposure(pendingState.aeState, pendingState.aeTriggerId);
- }
-
- if (pendingState.afState != m3aState.afState ||
- pendingState.afMode != m3aState.afMode ||
- pendingState.afTriggerId != m3aState.afTriggerId) {
- ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
- __FUNCTION__, cameraId,
- m3aState.afState, pendingState.afState,
- m3aState.afMode, pendingState.afMode,
- m3aState.afTriggerId, pendingState.afTriggerId);
- client->notifyAutoFocus(pendingState.afState, pendingState.afTriggerId);
- }
- if (pendingState.awbState != m3aState.awbState ||
- pendingState.awbMode != m3aState.awbMode) {
- ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
- __FUNCTION__, cameraId,
- m3aState.awbState, pendingState.awbState,
- m3aState.awbMode, pendingState.awbMode);
- client->notifyAutoWhitebalance(pendingState.awbState,
- pendingState.aeTriggerId);
- }
-
if (index != NAME_NOT_FOUND) {
mPending3AStates.removeItemsAt(index);
}
- m3aState = pendingState;
mLast3AFrameNumber = frameNumber;
return OK;
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 62a4e91..8183c12 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -104,8 +104,7 @@
// Track most recent frame number for which 3A notifications were sent for.
// Used to filter against sending 3A notifications for the same frame
// several times.
- int32_t mLast3AFrameNumber;
-
+ int32_t mLast3AFrameNumber, mLastAEFrameNumber, mLastAFrameNumber, mLastAWBFrameNumber;
// Emit FaceDetection event to java if faces changed
void callbackFaceDetection(const sp<Camera2Client>& client,
const camera_frame_metadata &metadata);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 98d0534..baf051a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -661,7 +661,8 @@
}
sp<Surface> surface;
- res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer);
+ res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
+ physicalCameraId);
if (!res.isOk())
return res;
@@ -889,6 +890,8 @@
const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
outputConfiguration.getGraphicBufferProducers();
+ String8 physicalCameraId(outputConfiguration.getPhysicalCameraId());
+
auto producerCount = bufferProducers.size();
if (producerCount == 0) {
ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
@@ -942,7 +945,7 @@
OutputStreamInfo outInfo;
sp<Surface> surface;
res = createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false, surface,
- newOutputsMap.valueAt(i));
+ newOutputsMap.valueAt(i), physicalCameraId);
if (!res.isOk())
return res;
@@ -1021,7 +1024,8 @@
binder::Status CameraDeviceClient::createSurfaceFromGbp(
OutputStreamInfo& streamInfo, bool isStreamInfoValid,
- sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp) {
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+ const String8& physicalId) {
// bufferProducer must be non-null
if (gbp == nullptr) {
@@ -1098,7 +1102,7 @@
// Round dimensions to the nearest dimensions available for this format
if (flexibleConsumer && isPublicFormat(format) &&
!CameraDeviceClient::roundBufferDimensionNearest(width, height,
- format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
+ format, dataSpace, mDevice->info(physicalId), /*out*/&width, /*out*/&height)) {
String8 msg = String8::format("Camera %s: No supported stream configurations with "
"format %#x defined, failed to create output stream",
mCameraIdStr.string(), format);
@@ -1468,6 +1472,7 @@
const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
outputConfiguration.getGraphicBufferProducers();
+ String8 physicalId(outputConfiguration.getPhysicalCameraId());
if (bufferProducers.size() == 0) {
ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
@@ -1521,7 +1526,7 @@
sp<Surface> surface;
res = createSurfaceFromGbp(mStreamInfoMap[streamId], true /*isStreamInfoValid*/,
- surface, bufferProducer);
+ surface, bufferProducer, physicalId);
if (!res.isOk())
return res;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 5aaf5aa..c30561d 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -258,7 +258,8 @@
// Create a Surface from an IGraphicBufferProducer. Returns error if
// IGraphicBufferProducer's property doesn't match with streamInfo
binder::Status createSurfaceFromGbp(OutputStreamInfo& streamInfo, bool isStreamInfoValid,
- sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp);
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+ const String8& physicalCameraId);
// Utility method to insert the surface into SurfaceMap
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 0ba7403..98c1b5e 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -69,6 +69,10 @@
* The device's static characteristics metadata buffer
*/
virtual const CameraMetadata& info() const = 0;
+ /**
+ * The physical camera device's static characteristics metadata buffer
+ */
+ virtual const CameraMetadata& info(const String8& physicalId) const = 0;
struct PhysicalCameraSettings {
std::string cameraId;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 43f1a91..3be6399 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -336,6 +336,7 @@
const hardware::hidl_string& /*fqName*/,
const hardware::hidl_string& name,
bool /*preexisting*/) {
+ std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
{
std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -458,6 +459,7 @@
}
status_t CameraProviderManager::removeProvider(const std::string& provider) {
+ std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
std::unique_lock<std::mutex> lock(mInterfaceMutex);
std::vector<String8> removedDeviceIds;
status_t res = NAME_NOT_FOUND;
@@ -631,7 +633,12 @@
mUniqueCameraIds.insert(id);
if (isAPI1Compatible) {
- mUniqueAPI1CompatibleCameraIds.push_back(id);
+ // addDevice can be called more than once for the same camera id if HAL
+ // supports openLegacy.
+ if (std::find(mUniqueAPI1CompatibleCameraIds.begin(), mUniqueAPI1CompatibleCameraIds.end(),
+ id) == mUniqueAPI1CompatibleCameraIds.end()) {
+ mUniqueAPI1CompatibleCameraIds.push_back(id);
+ }
}
if (parsedId != nullptr) {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index b8b8b8c..c523c2d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -246,6 +246,9 @@
wp<StatusListener> mListener;
ServiceInteractionProxy* mServiceProxy;
+ // mProviderLifecycleLock is locked during onRegistration and removeProvider
+ mutable std::mutex mProviderLifecycleLock;
+
static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
struct ProviderInfo :
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 543914e..eb66493 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -121,11 +121,25 @@
res = manager->getCameraCharacteristics(mId.string(), &mDeviceInfo);
if (res != OK) {
- SET_ERR_L("Could not retrive camera characteristics: %s (%d)", strerror(-res), res);
+ SET_ERR_L("Could not retrieve camera characteristics: %s (%d)", strerror(-res), res);
session->close();
return res;
}
+ std::vector<std::string> physicalCameraIds;
+ bool isLogical = CameraProviderManager::isLogicalCamera(mDeviceInfo, &physicalCameraIds);
+ if (isLogical) {
+ for (auto& physicalId : physicalCameraIds) {
+ res = manager->getCameraCharacteristics(physicalId, &mPhysicalDeviceInfoMap[physicalId]);
+ if (res != OK) {
+ SET_ERR_L("Could not retrieve camera %s characteristics: %s (%d)",
+ physicalId.c_str(), strerror(-res), res);
+ session->close();
+ return res;
+ }
+ }
+ }
+
std::shared_ptr<RequestMetadataQueue> queue;
auto requestQueueRet = session->getCaptureRequestMetadataQueue(
[&queue](const auto& descriptor) {
@@ -719,7 +733,7 @@
return OK;
}
-const CameraMetadata& Camera3Device::info() const {
+const CameraMetadata& Camera3Device::info(const String8& physicalId) const {
ALOGVV("%s: E", __FUNCTION__);
if (CC_UNLIKELY(mStatus == STATUS_UNINITIALIZED ||
mStatus == STATUS_ERROR)) {
@@ -727,7 +741,22 @@
mStatus == STATUS_ERROR ?
"when in error state" : "before init");
}
- return mDeviceInfo;
+ if (physicalId.isEmpty()) {
+ return mDeviceInfo;
+ } else {
+ std::string id(physicalId.c_str());
+ if (mPhysicalDeviceInfoMap.find(id) != mPhysicalDeviceInfoMap.end()) {
+ return mPhysicalDeviceInfoMap.at(id);
+ } else {
+ ALOGE("%s: Invalid physical camera id %s", __FUNCTION__, physicalId.c_str());
+ return mDeviceInfo;
+ }
+ }
+}
+
+const CameraMetadata& Camera3Device::info() const {
+ String8 emptyId;
+ return info(emptyId);
}
status_t Camera3Device::checkStatusOkToCaptureLocked() {
@@ -4710,6 +4739,7 @@
status_t Camera3Device::RequestThread::prepareHalRequests() {
ATRACE_CALL();
+ bool batchedRequest = mNextRequests[0].captureRequest->mBatchSize > 1;
for (size_t i = 0; i < mNextRequests.size(); i++) {
auto& nextRequest = mNextRequests.editItemAt(i);
sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
@@ -4733,7 +4763,10 @@
mPrevTriggers = triggerCount;
// If the request is the same as last, or we had triggers last time
- bool newRequest = mPrevRequest != captureRequest || triggersMixedIn;
+ bool newRequest = (mPrevRequest != captureRequest || triggersMixedIn) &&
+ // Request settings are all the same within one batch, so only treat the first
+ // request in a batch as new
+ !(batchedRequest && i >= 0);
if (newRequest) {
/**
* HAL workaround:
@@ -4882,7 +4915,7 @@
// preview), and the current request is not the last one in the batch,
// do not send callback to the app.
bool hasCallback = true;
- if (mNextRequests[0].captureRequest->mBatchSize > 1 && i != mNextRequests.size()-1) {
+ if (batchedRequest && i != mNextRequests.size()-1) {
hasCallback = false;
}
res = parent->registerInFlight(halRequest->frame_number,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index d8fe19f..ef3cbc4 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -101,6 +101,7 @@
status_t disconnect() override;
status_t dump(int fd, const Vector<String16> &args) override;
const CameraMetadata& info() const override;
+ const CameraMetadata& info(const String8& physicalId) const override;
// Capture and setStreamingRequest will configure streams if currently in
// idle state
@@ -379,6 +380,7 @@
sp<HalInterface> mInterface;
CameraMetadata mDeviceInfo;
+ std::unordered_map<std::string, CameraMetadata> mPhysicalDeviceInfoMap;
CameraMetadata mRequestTemplateCache[CAMERA3_TEMPLATE_COUNT];
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
index 6ec8895..edf4dab 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -55,4 +55,8 @@
getdents64: 1
getrandom: 1
+# Used by UBSan diagnostic messages
+readlink: 1
+open: 1
+
@include /system/etc/seccomp_policy/crash_dump.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediacodec-x86.policy b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
index bbbe552..6e6b276 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-x86.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
@@ -55,4 +55,8 @@
getpid: 1
gettid: 1
+# Used by UBSan diagnostic messages
+readlink: 1
+open: 1
+
@include /system/etc/seccomp_policy/crash_dump.x86.policy
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 37d6cc9..73c9535 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -18,6 +18,7 @@
LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediaextractor.policy
LOCAL_REQUIRED_MODULES_arm64 := crash_dump.policy mediaextractor.policy
LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaextractor.policy
+LOCAL_REQUIRED_MODULES_x86_64 := crash_dump.policy mediaextractor.policy
# extractor libraries
LOCAL_REQUIRED_MODULES += \
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
old mode 100755
new mode 100644
index 63c7780..6d9ed6f
--- a/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-x86_64.policy
@@ -21,6 +21,7 @@
getuid: 1
setpriority: 1
sigaltstack: 1
+fstatfs: 1
newfstatat: 1
restart_syscall: 1
exit: 1
@@ -30,28 +31,21 @@
sched_setscheduler: 1
getrlimit: 1
nanosleep: 1
+getrandom: 1
+
+# for dynamically loading extractors
+getdents64: 1
+readlinkat: 1
+pread64: 1
+mremap: 1
# for FileSource
readlinkat: 1
-# for attaching to debuggerd on process crash
-tgkill: 1
-socket: arg0 == 1
-connect: 1
-fcntl: 1
-rt_sigprocmask: 1
-rt_sigaction: 1
-rt_tgsigqueueinfo: 1
-geteuid: 1
-getgid: 1
-getegid: 1
-getgroups: 1
-getdents64: 1
-pipe2: 1
-ppoll: 1
-
# Required by AddressSanitizer
gettid: 1
sched_yield: 1
getpid: 1
gettid: 1
+
+@include /system/etc/seccomp_policy/crash_dump.x86_64.policy
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index 29e6dfc..ca96f62 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -9,7 +9,9 @@
shared_libs: [
"libaudioutils",
"libbinder",
+ "libcutils",
"liblog",
+ "libmediautils",
"libnbaio",
"libnblog",
"libutils",
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index 1be5544..e58dff7 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -21,7 +21,7 @@
#include <utils/Log.h>
#include <binder/PermissionCache.h>
#include <media/nblog/NBLog.h>
-#include <private/android_filesystem_config.h>
+#include <mediautils/ServiceUtilities.h>
#include "MediaLogService.h"
namespace android {
@@ -53,7 +53,7 @@
void MediaLogService::registerWriter(const sp<IMemory>& shared, size_t size, const char *name)
{
- if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0 ||
+ if (!isAudioServerOrMediaServerUid(IPCThreadState::self()->getCallingUid()) || shared == 0 ||
size < kMinSize || size > kMaxSize || name == NULL ||
shared->size() < NBLog::Timeline::sharedSize(size)) {
return;
@@ -67,7 +67,7 @@
void MediaLogService::unregisterWriter(const sp<IMemory>& shared)
{
- if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0) {
+ if (!isAudioServerOrMediaServerUid(IPCThreadState::self()->getCallingUid()) || shared == 0) {
return;
}
Mutex::Autolock _l(mLock);
@@ -95,10 +95,8 @@
status_t MediaLogService::dump(int fd, const Vector<String16>& args __unused)
{
- // FIXME merge with similar but not identical code at services/audioflinger/ServiceUtilities.cpp
- static const String16 sDump("android.permission.DUMP");
- if (!(IPCThreadState::self()->getCallingUid() == AID_AUDIOSERVER ||
- PermissionCache::checkCallingPermission(sDump))) {
+ if (!(isAudioServerOrMediaServerUid(IPCThreadState::self()->getCallingUid())
+ || dumpAllowed())) {
dprintf(fd, "Permission Denial: can't dump media.log from pid=%d, uid=%d\n",
IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 6a72e5b..94440b1 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -24,6 +24,7 @@
#include <aaudio/AAudio.h>
#include <mediautils/SchedulingPolicyService.h>
+#include <mediautils/ServiceUtilities.h>
#include <utils/String16.h>
#include "binding/AAudioServiceMessage.h"
@@ -33,7 +34,6 @@
#include "AAudioServiceStreamMMAP.h"
#include "AAudioServiceStreamShared.h"
#include "binding/IAAudioService.h"
-#include "ServiceUtilities.h"
using namespace android;
using namespace aaudio;
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
index 584b2ef..3d5f140 100644
--- a/services/oboeservice/Android.mk
+++ b/services/oboeservice/Android.mk
@@ -53,7 +53,6 @@
libbinder \
libcutils \
libmediautils \
- libserviceutility \
libutils \
liblog
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index ad3666e..3c7d29d 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -34,8 +34,7 @@
libhardware \
libsoundtrigger \
libaudioclient \
- libserviceutility
-
+ libmediautils \
ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL),true)
# libhardware configuration
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index a7d6e83..eb9cd1d 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -27,13 +27,13 @@
#include <cutils/properties.h>
#include <hardware/hardware.h>
#include <media/AudioSystem.h>
+#include <mediautils/ServiceUtilities.h>
#include <utils/Errors.h>
#include <utils/Log.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <system/sound_trigger.h>
-#include <ServiceUtilities.h>
#include "SoundTriggerHwService.h"
#ifdef SOUND_TRIGGER_USE_STUB_MODULE
@@ -562,10 +562,7 @@
if (mHalInterface == 0) {
return NO_INIT;
}
- if (modelMemory == 0 || modelMemory->pointer() == NULL) {
- ALOGE("loadSoundModel() modelMemory is 0 or has NULL pointer()");
- return BAD_VALUE;
- }
+
struct sound_trigger_sound_model *sound_model =
(struct sound_trigger_sound_model *)modelMemory->pointer();
@@ -659,11 +656,6 @@
if (mHalInterface == 0) {
return NO_INIT;
}
- if (dataMemory == 0 || dataMemory->pointer() == NULL) {
- ALOGE("startRecognition() dataMemory is 0 or has NULL pointer()");
- return BAD_VALUE;
-
- }
struct sound_trigger_recognition_config *config =
(struct sound_trigger_recognition_config *)dataMemory->pointer();
@@ -966,6 +958,9 @@
IPCThreadState::self()->getCallingUid())) {
return PERMISSION_DENIED;
}
+ if (checkIMemory(modelMemory) != NO_ERROR) {
+ return BAD_VALUE;
+ }
sp<Module> module = mModule.promote();
if (module == 0) {
@@ -997,6 +992,9 @@
IPCThreadState::self()->getCallingUid())) {
return PERMISSION_DENIED;
}
+ if (checkIMemory(dataMemory) != NO_ERROR) {
+ return BAD_VALUE;
+ }
sp<Module> module = mModule.promote();
if (module == 0) {