Merge "Rearrange variables in PerformanceAnalysis::reportPerformance in preparation for moving to separate file later."
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 85aab57..97e160e 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -647,7 +647,7 @@
MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9,
- MEDIA_MIMETYPE_VIDEO_DOLBY_VISION
+ MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, MEDIA_MIMETYPE_AUDIO_AC4
};
const char *codecType = queryDecoders? "decoder" : "encoder";
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
new file mode 100644
index 0000000..167d474
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -0,0 +1,624 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AC4Parser"
+
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include "AC4Parser.h"
+
+#define BOOLSTR(a) ((a)?"true":"false")
+#define BYTE_ALIGN mBitReader.skipBits(mBitReader.numBitsLeft() % 8)
+#define CHECK_BITS_LEFT(n) if (mBitReader.numBitsLeft() < n) {return false;}
+
+namespace android {
+
+AC4Parser::AC4Parser() {
+}
+
+AC4DSIParser::AC4DSIParser(ABitReader &br)
+ : mBitReader(br){
+
+ mDSISize = mBitReader.numBitsLeft();
+}
+
+// ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+static const char *ChannelModes[] = {
+ "mono",
+ "stereo",
+ "3.0",
+ "5.0",
+ "5.1",
+ "7.0 (3/4/0)",
+ "7.1 (3/4/0.1)",
+ "7.0 (5/2/0)",
+ "7.1 (5/2/0.1)",
+ "7.0 (3/2/2)",
+ "7.1 (3/2/2.1)",
+ "7.0.4",
+ "7.1.4",
+ "9.0.4",
+ "9.1.4",
+ "22.2"
+};
+
+static const char* ContentClassifier[] = {
+ "Complete Main",
+ "Music and Effects",
+ "Visually Impaired",
+ "Hearing Impaired",
+ "Dialog",
+ "Commentary",
+ "Emergency",
+ "Voice Over"
+};
+
+bool AC4DSIParser::parseLanguageTag(uint32_t presentationID, uint32_t substreamID){
+ CHECK_BITS_LEFT(6);
+ uint32_t n_language_tag_bytes = mBitReader.getBits(6);
+ if (n_language_tag_bytes < 2 || n_language_tag_bytes >= 42) {
+ return false;
+ }
+ CHECK_BITS_LEFT(n_language_tag_bytes * 8);
+ char language_tag_bytes[42]; // TS 103 190 part 1 4.3.3.8.7
+ for (uint32_t i = 0; i < n_language_tag_bytes; i++) {
+ language_tag_bytes[i] = (char)mBitReader.getBits(8);
+ }
+ language_tag_bytes[n_language_tag_bytes] = 0;
+ ALOGV("%u.%u: language_tag = %s\n", presentationID, substreamID, language_tag_bytes);
+
+ std::string language(language_tag_bytes, n_language_tag_bytes);
+ mPresentations[presentationID].mLanguage = language;
+
+ return true;
+}
+
+// TS 103 190-1 v1.2.1 E.5 and TS 103 190-2 v1.1.1 E.9
+bool AC4DSIParser::parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID){
+ CHECK_BITS_LEFT(5);
+ uint32_t channel_mode = mBitReader.getBits(5);
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+ CHECK_BITS_LEFT(1);
+ bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: channel_mode = %u (%s)\n", presentationID, substreamID, channel_mode,
+ channel_mode < NELEM(ChannelModes) ? ChannelModes[channel_mode] : "reserved");
+ ALOGV("%u.%u: dsi_sf_multiplier = %u\n", presentationID,
+ substreamID, dsi_sf_multiplier);
+ ALOGV("%u.%u: b_substream_bitrate_indicator = %s\n", presentationID,
+ substreamID, BOOLSTR(b_substream_bitrate_indicator));
+
+ if (b_substream_bitrate_indicator) {
+ CHECK_BITS_LEFT(5);
+ uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+ ALOGV("%u.%u: substream_bitrate_indicator = %u\n", presentationID, substreamID,
+ substream_bitrate_indicator);
+ }
+ if (channel_mode >= 7 && channel_mode <= 10) {
+ CHECK_BITS_LEFT(1);
+ uint32_t add_ch_base = mBitReader.getBits(1);
+ ALOGV("%u.%u: add_ch_base = %u\n", presentationID, substreamID, add_ch_base);
+ }
+ CHECK_BITS_LEFT(1);
+ bool b_content_type = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_content_type = %s\n", presentationID, substreamID, BOOLSTR(b_content_type));
+ if (b_content_type) {
+ CHECK_BITS_LEFT(3);
+ uint32_t content_classifier = mBitReader.getBits(3);
+ ALOGV("%u.%u: content_classifier = %u (%s)\n", presentationID, substreamID,
+ content_classifier, ContentClassifier[content_classifier]);
+
+ // For streams based on TS 103 190 part 1 the presentation level channel_mode doesn't
+ // exist and so we use the channel_mode from either the CM or M&E substream
+ // (they are mutually exclusive)
+ if (mPresentations[presentationID].mChannelMode == -1 &&
+ (content_classifier == 0 || content_classifier == 1)) {
+ mPresentations[presentationID].mChannelMode = channel_mode;
+ }
+ mPresentations[presentationID].mContentClassifier = content_classifier;
+ CHECK_BITS_LEFT(1);
+ bool b_language_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, substreamID,
+ BOOLSTR(b_language_indicator));
+ if (b_language_indicator) {
+ if (!parseLanguageTag(presentationID, substreamID)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+// ETSI TS 103 190-2 v1.1.1 section E.11
+bool AC4DSIParser::parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID)
+{
+ CHECK_BITS_LEFT(1);
+ bool b_substreams_present = (mBitReader.getBits(1) == 1);
+ CHECK_BITS_LEFT(1);
+ bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+ CHECK_BITS_LEFT(1);
+ bool b_channel_coded = (mBitReader.getBits(1) == 1);
+ CHECK_BITS_LEFT(8);
+ uint32_t n_substreams = mBitReader.getBits(8);
+ ALOGV("%u.%u: b_substreams_present = %s\n", presentationID, groupID,
+ BOOLSTR(b_substreams_present));
+ ALOGV("%u.%u: b_hsf_ext = %s\n", presentationID, groupID, BOOLSTR(b_hsf_ext));
+ ALOGV("%u.%u: b_channel_coded = %s\n", presentationID, groupID, BOOLSTR(b_channel_coded));
+ ALOGV("%u.%u: n_substreams = %u\n", presentationID, groupID, n_substreams);
+
+ for (uint32_t i = 0; i < n_substreams; i++) {
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_sf_multiplier = mBitReader.getBits(2);
+ CHECK_BITS_LEFT(1);
+ bool b_substream_bitrate_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u.%u: dsi_sf_multiplier = %u\n", presentationID, groupID, i, dsi_sf_multiplier);
+ ALOGV("%u.%u.%u: b_substream_bitrate_indicator = %s\n", presentationID, groupID, i,
+ BOOLSTR(b_substream_bitrate_indicator));
+
+ if (b_substream_bitrate_indicator) {
+ CHECK_BITS_LEFT(5);
+ uint32_t substream_bitrate_indicator = mBitReader.getBits(5);
+ ALOGV("%u.%u.%u: substream_bitrate_indicator = %u\n", presentationID, groupID, i,
+ substream_bitrate_indicator);
+ }
+ if (b_channel_coded) {
+ CHECK_BITS_LEFT(24);
+ uint32_t dsi_substream_channel_mask = mBitReader.getBits(24);
+ ALOGV("%u.%u.%u: dsi_substream_channel_mask = 0x%06x\n", presentationID, groupID, i,
+ dsi_substream_channel_mask);
+ } else {
+ CHECK_BITS_LEFT(1);
+ bool b_ajoc = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u.%u: b_ajoc = %s\n", presentationID, groupID, i, BOOLSTR(b_ajoc));
+ if (b_ajoc) {
+ CHECK_BITS_LEFT(1);
+ bool b_static_dmx = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u.%u: b_static_dmx = %s\n", presentationID, groupID, i,
+ BOOLSTR(b_static_dmx));
+ if (!b_static_dmx) {
+ CHECK_BITS_LEFT(4);
+ uint32_t n_dmx_objects_minus1 = mBitReader.getBits(4);
+ ALOGV("%u.%u.%u: n_dmx_objects_minus1 = %u\n", presentationID, groupID, i,
+ n_dmx_objects_minus1);
+ }
+ CHECK_BITS_LEFT(6);
+ uint32_t n_umx_objects_minus1 = mBitReader.getBits(6);
+ ALOGV("%u.%u.%u: n_umx_objects_minus1 = %u\n", presentationID, groupID, i,
+ n_umx_objects_minus1);
+ }
+ CHECK_BITS_LEFT(4);
+ mBitReader.skipBits(4); // objects_assignment_mask
+ }
+ }
+
+ CHECK_BITS_LEFT(1);
+ bool b_content_type = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_content_type = %s\n", presentationID, groupID, BOOLSTR(b_content_type));
+ if (b_content_type) {
+ CHECK_BITS_LEFT(3);
+ uint32_t content_classifier = mBitReader.getBits(3);
+ ALOGV("%u.%u: content_classifier = %s (%u)\n", presentationID, groupID,
+ ContentClassifier[content_classifier], content_classifier);
+
+ mPresentations[presentationID].mContentClassifier = content_classifier;
+
+ CHECK_BITS_LEFT(1);
+ bool b_language_indicator = (mBitReader.getBits(1) == 1);
+ ALOGV("%u.%u: b_language_indicator = %s\n", presentationID, groupID,
+ BOOLSTR(b_language_indicator));
+
+ if (b_language_indicator) {
+ if (!parseLanguageTag(presentationID, groupID)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool AC4DSIParser::parseBitrateDsi() {
+ CHECK_BITS_LEFT(2 + 32 + 32);
+ mBitReader.skipBits(2); // bit_rate_mode
+ mBitReader.skipBits(32); // bit_rate
+ mBitReader.skipBits(32); // bit_rate_precision
+
+ return true;
+}
+
+// TS 103 190-1 section E.4 (ac4_dsi) and TS 103 190-2 section E.6 (ac4_dsi_v1)
+bool AC4DSIParser::parse() {
+ CHECK_BITS_LEFT(3);
+ uint32_t ac4_dsi_version = mBitReader.getBits(3);
+ if (ac4_dsi_version > 1) {
+ ALOGE("error while parsing ac-4 dsi: only versions 0 and 1 are supported");
+ return false;
+ }
+
+ CHECK_BITS_LEFT(7 + 1 + 4 + 9);
+ uint32_t bitstream_version = mBitReader.getBits(7);
+ mBitReader.skipBits(1); // fs_index
+ mBitReader.skipBits(4); // frame_rate_index
+ uint32_t n_presentations = mBitReader.getBits(9);
+
+ int32_t short_program_id = -1;
+ if (bitstream_version > 1) {
+ if (ac4_dsi_version == 0){
+ ALOGE("invalid ac4 dsi");
+ return false;
+ }
+ CHECK_BITS_LEFT(1);
+ bool b_program_id = (mBitReader.getBits(1) == 1);
+ if (b_program_id) {
+ CHECK_BITS_LEFT(16 + 1);
+ short_program_id = mBitReader.getBits(16);
+ bool b_uuid = (mBitReader.getBits(1) == 1);
+ if (b_uuid) {
+ const uint32_t kAC4UUIDSizeInBytes = 16;
+ char program_uuid[kAC4UUIDSizeInBytes];
+ CHECK_BITS_LEFT(kAC4UUIDSizeInBytes * 8);
+ for (uint32_t i = 0; i < kAC4UUIDSizeInBytes; i++) {
+ program_uuid[i] = (char)(mBitReader.getBits(8));
+ }
+ ALOGV("UUID = %s", program_uuid);
+ }
+ }
+ }
+
+ if (ac4_dsi_version == 1) {
+ if (!parseBitrateDsi()) {
+ return false;
+ }
+ BYTE_ALIGN;
+ }
+
+ for (uint32_t presentation = 0; presentation < n_presentations; presentation++) {
+ mPresentations[presentation].mProgramID = short_program_id;
+ // known as b_single_substream in ac4_dsi_version 0
+ bool b_single_substream_group = false;
+ uint32_t presentation_config = 0, presentation_version = 0;
+ uint32_t pres_bytes = 0;
+
+ if (ac4_dsi_version == 0) {
+ CHECK_BITS_LEFT(1 + 5 + 5);
+ b_single_substream_group = (mBitReader.getBits(1) == 1);
+ presentation_config = mBitReader.getBits(5);
+ presentation_version = mBitReader.getBits(5);
+ } else if (ac4_dsi_version == 1) {
+ CHECK_BITS_LEFT(8 + 8);
+ presentation_version = mBitReader.getBits(8);
+ pres_bytes = mBitReader.getBits(8);
+ if (pres_bytes == 0xff) {
+ CHECK_BITS_LEFT(16);
+ pres_bytes += mBitReader.getBits(16);
+ }
+ ALOGV("%u: pres_bytes = %u\n", presentation, pres_bytes);
+ if (presentation_version > 1) {
+ CHECK_BITS_LEFT(pres_bytes * 8);
+ mBitReader.skipBits(pres_bytes * 8);
+ continue;
+ }
+ // ac4_presentation_v0_dsi() and ac4_presentation_v1_dsi() both
+ // start with a presentation_config of 5 bits
+ CHECK_BITS_LEFT(5);
+ presentation_config = mBitReader.getBits(5);
+ b_single_substream_group = (presentation_config == 0x1f);
+ }
+
+ static const char *PresentationConfig[] = {
+ "Music&Effects + Dialog",
+ "Main + DE",
+ "Main + Associate",
+ "Music&Effects + Dialog + Associate",
+ "Main + DE + Associate",
+ "Arbitrary substream groups",
+ "EMDF only"
+ };
+ ALOGV("%u: b_single_substream/group = %s\n", presentation,
+ BOOLSTR(b_single_substream_group));
+ ALOGV("%u: presentation_version = %u\n", presentation, presentation_version);
+ ALOGV("%u: presentation_config = %u (%s)\n", presentation, presentation_config,
+ (presentation_config >= NELEM(PresentationConfig) ?
+ "reserved" : PresentationConfig[presentation_config]));
+
+ /* record a marker, less the size of the presentation_config */
+ uint64_t start = (mDSISize - mBitReader.numBitsLeft()) / 8;
+
+ bool b_add_emdf_substreams = false;
+ if (!b_single_substream_group && presentation_config == 6) {
+ b_add_emdf_substreams = true;
+ ALOGV("%u: b_add_emdf_substreams = %s\n", presentation, BOOLSTR(b_add_emdf_substreams));
+ } else {
+ CHECK_BITS_LEFT(3 + 1);
+ uint32_t mdcompat = mBitReader.getBits(3);
+ ALOGV("%u: mdcompat = %d\n", presentation, mdcompat);
+
+ bool b_presentation_group_index = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_presentation_group_index = %s\n", presentation,
+ BOOLSTR(b_presentation_group_index));
+ if (b_presentation_group_index) {
+ CHECK_BITS_LEFT(5);
+ mPresentations[presentation].mGroupIndex = mBitReader.getBits(5);
+ ALOGV("%u: presentation_group_index = %d\n", presentation,
+ mPresentations[presentation].mGroupIndex);
+ }
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_frame_rate_multiply_info = mBitReader.getBits(2);
+ ALOGV("%u: dsi_frame_rate_multiply_info = %d\n", presentation,
+ dsi_frame_rate_multiply_info);
+ if (ac4_dsi_version == 1 && presentation_version == 1) {
+ CHECK_BITS_LEFT(2);
+ uint32_t dsi_frame_rate_fraction_info = mBitReader.getBits(2);
+ ALOGV("%u: dsi_frame_rate_fraction_info = %d\n", presentation,
+ dsi_frame_rate_fraction_info);
+ }
+ CHECK_BITS_LEFT(5 + 10);
+ uint32_t presentation_emdf_version = mBitReader.getBits(5);
+ uint32_t presentation_key_id = mBitReader.getBits(10);
+ ALOGV("%u: presentation_emdf_version = %d\n", presentation, presentation_emdf_version);
+ ALOGV("%u: presentation_key_id = %d\n", presentation, presentation_key_id);
+
+ if (ac4_dsi_version == 1) {
+ bool b_presentation_channel_coded = false;
+ if (presentation_version == 0) {
+ b_presentation_channel_coded = true;
+ } else {
+ CHECK_BITS_LEFT(1);
+ b_presentation_channel_coded = (mBitReader.getBits(1) == 1);
+ }
+ ALOGV("%u: b_presentation_channel_coded = %s\n", presentation,
+ BOOLSTR(b_presentation_channel_coded));
+ if (b_presentation_channel_coded) {
+ if (presentation_version == 1) {
+ CHECK_BITS_LEFT(5);
+ uint32_t dsi_presentation_ch_mode = mBitReader.getBits(5);
+ mPresentations[presentation].mChannelMode = dsi_presentation_ch_mode;
+ ALOGV("%u: dsi_presentation_ch_mode = %d (%s)\n", presentation,
+ dsi_presentation_ch_mode,
+ dsi_presentation_ch_mode < NELEM(ChannelModes) ?
+ ChannelModes[dsi_presentation_ch_mode] : "reserved");
+
+ if (dsi_presentation_ch_mode >= 11 && dsi_presentation_ch_mode <= 14) {
+ CHECK_BITS_LEFT(1 + 2);
+ uint32_t pres_b_4_back_channels_present = mBitReader.getBits(1);
+ uint32_t pres_top_channel_pairs = mBitReader.getBits(2);
+ ALOGV("%u: pres_b_4_back_channels_present = %s\n", presentation,
+ BOOLSTR(pres_b_4_back_channels_present));
+ ALOGV("%u: pres_top_channel_pairs = %d\n", presentation,
+ pres_top_channel_pairs);
+ }
+ }
+ // presentation_channel_mask in ac4_presentation_v0_dsi()
+ CHECK_BITS_LEFT(24);
+ uint32_t presentation_channel_mask_v1 = mBitReader.getBits(24);
+ ALOGV("%u: presentation_channel_mask_v1 = 0x%06x\n", presentation,
+ presentation_channel_mask_v1);
+ }
+ if (presentation_version == 1) {
+ CHECK_BITS_LEFT(1);
+ bool b_presentation_core_differs = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_presentation_core_differs = %s\n", presentation,
+ BOOLSTR(b_presentation_core_differs));
+ if (b_presentation_core_differs) {
+ CHECK_BITS_LEFT(1);
+ bool b_presentation_core_channel_coded = (mBitReader.getBits(1) == 1);
+ if (b_presentation_core_channel_coded) {
+ CHECK_BITS_LEFT(2);
+ mBitReader.skipBits(2); // dsi_presentation_channel_mode_core
+ }
+ }
+ CHECK_BITS_LEFT(1);
+ bool b_presentation_filter = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_presentation_filter = %s\n", presentation,
+ BOOLSTR(b_presentation_filter));
+ if (b_presentation_filter) {
+ CHECK_BITS_LEFT(1 + 8);
+ bool b_enable_presentation = (mBitReader.getBits(1) == 1);
+ if (!b_enable_presentation) {
+ mPresentations[presentation].mEnabled = false;
+ }
+ ALOGV("%u: b_enable_presentation = %s\n", presentation,
+ BOOLSTR(b_enable_presentation));
+ uint32_t n_filter_bytes = mBitReader.getBits(8);
+ CHECK_BITS_LEFT(n_filter_bytes * 8);
+ for (uint32_t i = 0; i < n_filter_bytes; i++) {
+ mBitReader.skipBits(8); // filter_data
+ }
+ }
+ }
+ } /* ac4_dsi_version == 1 */
+
+ if (b_single_substream_group) {
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ } else {
+ if (!parseSubstreamGroupDSI(presentation, 0)) {
+ return false;
+ }
+ }
+ } else {
+ if (ac4_dsi_version == 1) {
+ CHECK_BITS_LEFT(1);
+ bool b_multi_pid = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_multi_pid = %s\n", presentation, BOOLSTR(b_multi_pid));
+ } else {
+ CHECK_BITS_LEFT(1);
+ bool b_hsf_ext = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_hsf_ext = %s\n", presentation, BOOLSTR(b_hsf_ext));
+ }
+ switch (presentation_config) {
+ case 0:
+ case 1:
+ case 2:
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamDSI(presentation, 1)) {
+ return false;
+ }
+ } else {
+ if (!parseSubstreamGroupDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamGroupDSI(presentation, 1)) {
+ return false;
+ }
+ }
+ break;
+ case 3:
+ case 4:
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamDSI(presentation, 1)) {
+ return false;
+ }
+ if (!parseSubstreamDSI(presentation, 2)) {
+ return false;
+ }
+ } else {
+ if (!parseSubstreamGroupDSI(presentation, 0)) {
+ return false;
+ }
+ if (!parseSubstreamGroupDSI(presentation, 1)) {
+ return false;
+ }
+ if (!parseSubstreamGroupDSI(presentation, 2)) {
+ return false;
+ }
+ }
+ break;
+ case 5:
+ if (presentation_version == 0) {
+ if (!parseSubstreamDSI(presentation, 0)) {
+ return false;
+ }
+ } else {
+ CHECK_BITS_LEFT(3);
+ uint32_t n_substream_groups_minus2 = mBitReader.getBits(3);
+ ALOGV("%u: n_substream_groups_minus2 = %d\n", presentation,
+ n_substream_groups_minus2);
+ for (uint32_t sg = 0; sg < n_substream_groups_minus2 + 2; sg++) {
+ if (!parseSubstreamGroupDSI(presentation, sg)) {
+ return false;
+ }
+ }
+ }
+ break;
+ default:
+ CHECK_BITS_LEFT(7);
+ uint32_t n_skip_bytes = mBitReader.getBits(7);
+ CHECK_BITS_LEFT(n_skip_bytes * 8)
+ for (uint32_t j = 0; j < n_skip_bytes; j++) {
+ mBitReader.getBits(8);
+ }
+ break;
+ }
+ CHECK_BITS_LEFT(1 + 1);
+ bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
+ mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
+ b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
+ ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
+ BOOLSTR(b_add_emdf_substreams));
+ }
+ }
+ if (b_add_emdf_substreams) {
+ CHECK_BITS_LEFT(7);
+ uint32_t n_add_emdf_substreams = mBitReader.getBits(7);
+ for (uint32_t j = 0; j < n_add_emdf_substreams; j++) {
+ CHECK_BITS_LEFT(5 + 10);
+ uint32_t substream_emdf_version = mBitReader.getBits(5);
+ uint32_t substream_key_id = mBitReader.getBits(10);
+ ALOGV("%u: emdf_substream[%d]: version=%d, key_id=%d\n", presentation, j,
+ substream_emdf_version, substream_key_id);
+ }
+ }
+
+ bool b_presentation_bitrate_info = false;
+ if (presentation_version > 0) {
+ CHECK_BITS_LEFT(1);
+ b_presentation_bitrate_info = (mBitReader.getBits(1) == 1);
+ }
+
+ ALOGV("b_presentation_bitrate_info = %s\n", BOOLSTR(b_presentation_bitrate_info));
+ if (b_presentation_bitrate_info) {
+ if (!parseBitrateDsi()) {
+ return false;
+ }
+ }
+
+ if (presentation_version > 0) {
+ CHECK_BITS_LEFT(1);
+ bool b_alternative = (mBitReader.getBits(1) == 1);
+ ALOGV("b_alternative = %s\n", BOOLSTR(b_alternative));
+ if (b_alternative) {
+ BYTE_ALIGN;
+ CHECK_BITS_LEFT(16);
+ uint32_t name_len = mBitReader.getBits(16);
+ char* presentation_name = new char[name_len+1];
+ CHECK_BITS_LEFT(name_len * 8);
+ for (uint32_t i = 0; i < name_len; i++) {
+ presentation_name[i] = (char)(mBitReader.getBits(8));
+ }
+ presentation_name[name_len] = '\0';
+ std::string description(presentation_name, name_len);
+ mPresentations[presentation].mDescription = description;
+ CHECK_BITS_LEFT(5);
+ uint32_t n_targets = mBitReader.getBits(5);
+ CHECK_BITS_LEFT(n_targets * (3 + 8));
+ for (uint32_t i = 0; i < n_targets; i++){
+ mBitReader.skipBits(3); // target_md_compat
+ mBitReader.skipBits(8); // target_device_category
+ }
+ }
+ }
+
+ BYTE_ALIGN;
+
+ if (ac4_dsi_version == 1) {
+ uint64_t end = (mDSISize - mBitReader.numBitsLeft()) / 8;
+ if (mBitReader.numBitsLeft() % 8 != 0) {
+ end += 1;
+ }
+
+ uint64_t presentation_bytes = end - start;
+ uint64_t skip_bytes = pres_bytes - presentation_bytes;
+ ALOGV("skipping = %" PRIu64 " bytes", skip_bytes);
+ CHECK_BITS_LEFT(skip_bytes * 8);
+ mBitReader.skipBits(skip_bytes * 8);
+ }
+
+ // we should know this or something is probably wrong
+ // with the bitstream (or we don't support it)
+ if (mPresentations[presentation].mChannelMode == -1){
+ ALOGE("could not determing channel mode of presentation %d", presentation);
+ return false;
+ }
+ } /* each presentation */
+
+ return true;
+}
+
+};
diff --git a/media/extractors/mp4/AC4Parser.h b/media/extractors/mp4/AC4Parser.h
new file mode 100644
index 0000000..73b6e31
--- /dev/null
+++ b/media/extractors/mp4/AC4Parser.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AC4_PARSER_H_
+#define AC4_PARSER_H_
+
+#include <cstdint>
+#include <map>
+#include <string>
+
+#include <media/stagefright/foundation/ABitReader.h>
+
+namespace android {
+
+class AC4Parser {
+public:
+ AC4Parser();
+ virtual ~AC4Parser() { }
+
+ virtual bool parse() = 0;
+
+ struct AC4Presentation {
+ int32_t mChannelMode = -1;
+ int32_t mProgramID = -1;
+ int32_t mGroupIndex = -1;
+
+ // TS 103 190-1 v1.2.1 4.3.3.8.1
+ enum ContentClassifiers {
+ kCompleteMain,
+ kMusicAndEffects,
+ kVisuallyImpaired,
+ kHearingImpaired,
+ kDialog,
+ kCommentary,
+ kEmergency,
+ kVoiceOver
+ };
+
+ uint32_t mContentClassifier = kCompleteMain;
+
+ // ETSI TS 103 190-2 V1.1.1 (2015-09) Table 79: channel_mode
+ enum InputChannelMode {
+ kChannelMode_Mono,
+ kChannelMode_Stereo,
+ kChannelMode_3_0,
+ kChannelMode_5_0,
+ kChannelMode_5_1,
+ kChannelMode_7_0_34,
+ kChannelMode_7_1_34,
+ kChannelMode_7_0_52,
+ kChannelMode_7_1_52,
+ kChannelMode_7_0_322,
+ kChannelMode_7_1_322,
+ kChannelMode_7_0_4,
+ kChannelMode_7_1_4,
+ kChannelMode_9_0_4,
+ kChannelMode_9_1_4,
+ kChannelMode_22_2,
+ kChannelMode_Reserved,
+ };
+
+ bool mHasDialogEnhancements = false;
+ bool mPreVirtualized = false;
+ bool mEnabled = true;
+
+ std::string mLanguage;
+ std::string mDescription;
+ };
+ typedef std::map<uint32_t, AC4Presentation> AC4Presentations;
+
+ const AC4Presentations& getPresentations() const { return mPresentations; }
+
+protected:
+ AC4Presentations mPresentations;
+};
+
+class AC4DSIParser: public AC4Parser {
+public:
+ explicit AC4DSIParser(ABitReader &br);
+ virtual ~AC4DSIParser() { }
+
+ bool parse();
+
+private:
+ bool parseSubstreamDSI(uint32_t presentationID, uint32_t substreamID);
+ bool parseSubstreamGroupDSI(uint32_t presentationID, uint32_t groupID);
+ bool parseLanguageTag(uint32_t presentationID, uint32_t substreamID);
+ bool parseBitrateDsi();
+
+ uint64_t mDSISize;
+ ABitReader& mBitReader;
+};
+
+};
+
+#endif // AC4_PARSER_H_
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index fa739e8..40b2c97 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -2,6 +2,7 @@
name: "libmp4extractor_defaults",
srcs: [
+ "AC4Parser.cpp",
"ItemTable.cpp",
"MPEG4Extractor.cpp",
"SampleIterator.cpp",
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 7b3b81d..8412812 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -26,6 +26,7 @@
#include <utils/Log.h>
+#include "AC4Parser.h"
#include "MPEG4Extractor.h"
#include "SampleTable.h"
#include "ItemTable.h"
@@ -125,6 +126,8 @@
bool mIsAVC;
bool mIsHEVC;
+ bool mIsAC4;
+
size_t mNALLengthSize;
bool mStarted;
@@ -324,6 +327,8 @@
case FOURCC('h', 'v', 'c', '1'):
case FOURCC('h', 'e', 'v', '1'):
return MEDIA_MIMETYPE_VIDEO_HEVC;
+ case FOURCC('a', 'c', '-', '4'):
+ return MEDIA_MIMETYPE_AUDIO_AC4;
default:
ALOGW("Unknown fourcc: %c%c%c%c",
(fourcc >> 24) & 0xff,
@@ -2436,6 +2441,12 @@
return parseAC3SampleEntry(data_offset);
}
+ case FOURCC('a', 'c', '-', '4'):
+ {
+ *offset += chunk_size;
+ return parseAC4SampleEntry(data_offset);
+ }
+
case FOURCC('f', 't', 'y', 'p'):
{
if (chunk_data_size < 8 || depth != 0) {
@@ -2507,6 +2518,84 @@
return OK;
}
+status_t MPEG4Extractor::parseAC4SampleEntry(off64_t offset) {
+ // skip 16 bytes:
+ // + 6-byte reserved,
+ // + 2-byte data reference index,
+ // + 8-byte reserved
+ offset += 16;
+ uint16_t channelCount;
+ if (!mDataSource->getUInt16(offset, &channelCount)) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 block: cannot read channel count");
+ return ERROR_MALFORMED;
+ }
+ // skip 8 bytes:
+ // + 2-byte channelCount,
+ // + 2-byte sample size,
+ // + 4-byte reserved
+ offset += 8;
+ uint16_t sampleRate;
+ if (!mDataSource->getUInt16(offset, &sampleRate)) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 block: cannot read sample rate");
+ return ERROR_MALFORMED;
+ }
+
+ // skip 4 bytes:
+ // + 2-byte sampleRate,
+ // + 2-byte reserved
+ offset += 4;
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+ mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+ mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
+ mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+ return parseAC4SpecificBox(offset);
+}
+
+status_t MPEG4Extractor::parseAC4SpecificBox(off64_t offset) {
+ uint32_t size;
+ // + 4-byte size
+ // + 4-byte type
+ // + 3-byte payload
+ const uint32_t kAC4MinimumBoxSize = 4 + 4 + 3;
+ if (!mDataSource->getUInt32(offset, &size) || size < kAC4MinimumBoxSize) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 block: cannot read specific box size");
+ return ERROR_MALFORMED;
+ }
+
+ // + 4-byte size
+ offset += 4;
+ uint32_t type;
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '4')) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 specific block: header not dac4");
+ return ERROR_MALFORMED;
+ }
+
+ // + 4-byte type
+ offset += 4;
+ // at least for AC4 DSI v1 this is big enough
+ const uint32_t kAC4SpecificBoxPayloadSize = 256;
+ uint8_t chunk[kAC4SpecificBoxPayloadSize];
+ ssize_t dsiSize = size - 8; // size of box - size and type fields
+ if (dsiSize >= (ssize_t)kAC4SpecificBoxPayloadSize ||
+ mDataSource->readAt(offset, chunk, dsiSize) != dsiSize) {
+ ALOGE("MPEG4Extractor: error while reading ac-4 specific block: bitstream fields");
+ return ERROR_MALFORMED;
+ }
+ // + size-byte payload
+ offset += dsiSize;
+ ABitReader br(chunk, dsiSize);
+ AC4DSIParser parser(br);
+ if (!parser.parse()){
+ ALOGE("MPEG4Extractor: error while parsing ac-4 specific block");
+ return ERROR_MALFORMED;
+ }
+
+ return OK;
+}
+
status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
// skip 16 bytes:
// + 6-byte reserved,
@@ -3857,6 +3946,7 @@
mCurrentSampleInfoOffsets(NULL),
mIsAVC(false),
mIsHEVC(false),
+ mIsAC4(false),
mNALLengthSize(0),
mStarted(false),
mGroup(NULL),
@@ -3890,6 +3980,7 @@
mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+ mIsAC4 = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4);
if (mIsAVC) {
uint32_t type;
@@ -4830,7 +4921,7 @@
}
}
- if ((!mIsAVC && !mIsHEVC) || mWantsNALFragments) {
+ if ((!mIsAVC && !mIsHEVC && !mIsAC4) || mWantsNALFragments) {
if (newBuffer) {
ssize_t num_bytes_read =
mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
@@ -4862,13 +4953,20 @@
++mCurrentSampleIndex;
}
- if (!mIsAVC && !mIsHEVC) {
+ if (!mIsAVC && !mIsHEVC && !mIsAC4) {
*out = mBuffer;
mBuffer = NULL;
return OK;
}
+ if (mIsAC4) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
// Each NAL unit is split up into its constituent fragments and
// each one of them returned in its own buffer.
@@ -4907,6 +5005,58 @@
*out = clone;
return OK;
+ } else if (mIsAC4) {
+ CHECK(mBuffer != NULL);
+ // Make sure there is enough space to write the sync header and the raw frame
+ if (mBuffer->range_length() < (7 + size)) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ uint8_t *dstData = (uint8_t *)mBuffer->data();
+ size_t dstOffset = 0;
+ // Add AC-4 sync header to MPEG4 encapsulated AC-4 raw frame
+ // AC40 sync word, meaning no CRC at the end of the frame
+ dstData[dstOffset++] = 0xAC;
+ dstData[dstOffset++] = 0x40;
+ dstData[dstOffset++] = 0xFF;
+ dstData[dstOffset++] = 0xFF;
+ dstData[dstOffset++] = (uint8_t)((size >> 16) & 0xFF);
+ dstData[dstOffset++] = (uint8_t)((size >> 8) & 0xFF);
+ dstData[dstOffset++] = (uint8_t)((size >> 0) & 0xFF);
+
+ ssize_t numBytesRead = mDataSource->readAt(offset, dstData + dstOffset, size);
+ if (numBytesRead != (ssize_t)size) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ mBuffer->set_range(0, dstOffset + size);
+ mBuffer->meta_data().clear();
+ mBuffer->meta_data().setInt64(
+ kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data().setInt64(
+ kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+
+ if (targetSampleTimeUs >= 0) {
+ mBuffer->meta_data().setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
+ if (isSyncSample) {
+ mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ }
+
+ ++mCurrentSampleIndex;
+
+ *out = mBuffer;
+ mBuffer = NULL;
+
+ return OK;
} else {
// Whole NAL units are returned but each fragment is prefixed by
// the start code (0x00 00 00 01).
@@ -5361,6 +5511,8 @@
return OK;
}
+
+ return OK;
}
MPEG4Extractor::Track *MPEG4Extractor::findTrackByMimePrefix(
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 3ea0963..ed70aa7 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -141,6 +141,8 @@
status_t parseAC3SampleEntry(off64_t offset);
status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+ status_t parseAC4SampleEntry(off64_t offset);
+ status_t parseAC4SpecificBox(off64_t offset);
MPEG4Extractor(const MPEG4Extractor &);
MPEG4Extractor &operator=(const MPEG4Extractor &);
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 1fa8789..0fb5abc 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -9,6 +9,7 @@
srcs: [
"JAudioTrack.cpp",
+ "JavaVMHelper.cpp",
"MediaPlayer2AudioOutput.cpp",
"mediaplayer2.cpp",
],
@@ -49,6 +50,10 @@
"media_plugin_headers",
],
+ include_dirs: [
+ "frameworks/base/core/jni",
+ ],
+
static_libs: [
"libmedia_helper",
"libstagefright_nuplayer2",
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index ac0cc57..778ae1b 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -21,7 +21,7 @@
#include "mediaplayer2/JAudioTrack.h"
#include <android_media_AudioErrors.h>
-#include <android_runtime/AndroidRuntime.h>
+#include <mediaplayer2/JavaVMHelper.h>
namespace android {
@@ -39,7 +39,7 @@
const audio_attributes_t* pAttributes, // AudioAttributes
float maxRequiredSpeed) { // bufferSizeInBytes
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
mAudioTrackCls = (jclass) env->NewGlobalRef(jAudioTrackCls);
@@ -116,19 +116,19 @@
}
JAudioTrack::~JAudioTrack() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
env->DeleteGlobalRef(mAudioTrackCls);
}
size_t JAudioTrack::frameCount() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetBufferSizeInFrames = env->GetMethodID(
mAudioTrackCls, "getBufferSizeInFrames", "()I");
return env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
}
size_t JAudioTrack::channelCount() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
return env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
}
@@ -143,7 +143,7 @@
return BAD_VALUE;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetPlaybackHeadPosition = env->GetMethodID(
mAudioTrackCls, "getPlaybackHeadPosition", "()I");
*position = env->CallIntMethod(mAudioTrackObj, jGetPlaybackHeadPosition);
@@ -152,7 +152,7 @@
}
bool JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
@@ -189,7 +189,7 @@
status_t JAudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) {
// TODO: existing native AudioTrack returns INVALID_OPERATION on offload/direct/fast tracks.
// Should we do the same thing?
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
jmethodID jPlaybackParamsCtor = env->GetMethodID(jPlaybackParamsCls, "<init>", "()V");
@@ -224,7 +224,7 @@
}
const AudioPlaybackRate JAudioTrack::getPlaybackRate() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetPlaybackParams = env->GetMethodID(
mAudioTrackCls, "getPlaybackParams", "()Landroid/media/PlaybackParams;");
@@ -266,7 +266,7 @@
return media::VolumeShaper::Status(BAD_VALUE);
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jCreateVolumeShaper = env->GetMethodID(mAudioTrackCls, "createVolumeShaper",
"(Landroid/media/VolumeShaper$Configuration;)Landroid/media/VolumeShaper;");
@@ -282,7 +282,7 @@
}
status_t JAudioTrack::setAuxEffectSendLevel(float level) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jSetAuxEffectSendLevel = env->GetMethodID(
mAudioTrackCls, "setAuxEffectSendLevel", "(F)I");
int result = env->CallIntMethod(mAudioTrackObj, jSetAuxEffectSendLevel, level);
@@ -290,14 +290,14 @@
}
status_t JAudioTrack::attachAuxEffect(int effectId) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jAttachAuxEffect = env->GetMethodID(mAudioTrackCls, "attachAuxEffect", "(I)I");
int result = env->CallIntMethod(mAudioTrackObj, jAttachAuxEffect, effectId);
return javaToNativeStatus(result);
}
status_t JAudioTrack::setVolume(float left, float right) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
// TODO: Java setStereoVolume is deprecated. Do we really need this method?
jmethodID jSetStereoVolume = env->GetMethodID(mAudioTrackCls, "setStereoVolume", "(FF)I");
int result = env->CallIntMethod(mAudioTrackObj, jSetStereoVolume, left, right);
@@ -305,14 +305,14 @@
}
status_t JAudioTrack::setVolume(float volume) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jSetVolume = env->GetMethodID(mAudioTrackCls, "setVolume", "(F)I");
int result = env->CallIntMethod(mAudioTrackObj, jSetVolume, volume);
return javaToNativeStatus(result);
}
status_t JAudioTrack::start() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jPlay = env->GetMethodID(mAudioTrackCls, "play", "()V");
// TODO: Should we catch the Java IllegalStateException from play()?
env->CallVoidMethod(mAudioTrackObj, jPlay);
@@ -324,7 +324,7 @@
return BAD_VALUE;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jbyteArray jAudioData = env->NewByteArray(size);
env->SetByteArrayRegion(jAudioData, 0, size, (jbyte *) buffer);
@@ -353,7 +353,7 @@
}
void JAudioTrack::stop() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jStop = env->GetMethodID(mAudioTrackCls, "stop", "()V");
env->CallVoidMethod(mAudioTrackObj, jStop);
// TODO: Should we catch IllegalStateException?
@@ -365,20 +365,20 @@
}
void JAudioTrack::flush() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jFlush = env->GetMethodID(mAudioTrackCls, "flush", "()V");
env->CallVoidMethod(mAudioTrackObj, jFlush);
}
void JAudioTrack::pause() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jPause = env->GetMethodID(mAudioTrackCls, "pause", "()V");
env->CallVoidMethod(mAudioTrackObj, jPause);
// TODO: Should we catch IllegalStateException?
}
bool JAudioTrack::isPlaying() const {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetPlayState = env->GetMethodID(mAudioTrackCls, "getPlayState", "()I");
int currentPlayState = env->CallIntMethod(mAudioTrackObj, jGetPlayState);
@@ -393,7 +393,7 @@
}
uint32_t JAudioTrack::getSampleRate() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetSampleRate = env->GetMethodID(mAudioTrackCls, "getSampleRate", "()I");
return env->CallIntMethod(mAudioTrackObj, jGetSampleRate);
}
@@ -403,7 +403,7 @@
return BAD_VALUE;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetBufferSizeInFrames = env->GetMethodID(
mAudioTrackCls, "getBufferSizeInFrames", "()I");
int bufferSizeInFrames = env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
@@ -417,7 +417,7 @@
}
audio_format_t JAudioTrack::format() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
return audioFormatToNative(javaFormat);
@@ -454,7 +454,7 @@
}
audio_port_handle_t JAudioTrack::getRoutedDeviceId() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetRoutedDevice = env->GetMethodID(mAudioTrackCls, "getRoutedDevice",
"()Landroid/media/AudioDeviceInfo;");
jobject jAudioDeviceInfoObj = env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
@@ -469,14 +469,14 @@
}
audio_session_t JAudioTrack::getAudioSessionId() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetAudioSessionId = env->GetMethodID(mAudioTrackCls, "getAudioSessionId", "()I");
jint sessionId = env->CallIntMethod(mAudioTrackObj, jGetAudioSessionId);
return (audio_session_t) sessionId;
}
status_t JAudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jMP2ImplCls = env->FindClass("android/media/MediaPlayer2Impl");
jmethodID jSetAudioOutputDeviceById = env->GetMethodID(
jMP2ImplCls, "setAudioOutputDeviceById", "(Landroid/media/AudioTrack;I)Z");
@@ -550,7 +550,7 @@
return NULL;
}
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
// Referenced "android_media_VolumeShaper.h".
jfloatArray xarray = nullptr;
@@ -595,7 +595,7 @@
jobject JAudioTrack::createVolumeShaperOperationObj(
const sp<media::VolumeShaper::Operation>& operation) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Operation$Builder");
jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
@@ -647,7 +647,7 @@
}
jobject JAudioTrack::createStreamEventCallback(callback_t cbf, void* user) {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jCallbackCls = env->FindClass("android/media/MediaPlayer2Impl$StreamEventCallback");
jmethodID jCallbackCtor = env->GetMethodID(jCallbackCls, "<init>", "(JJJ)V");
jobject jCallbackObj = env->NewObject(jCallbackCls, jCallbackCtor, this, cbf, user);
@@ -655,7 +655,7 @@
}
jobject JAudioTrack::createCallbackExecutor() {
- JNIEnv *env = AndroidRuntime::getJNIEnv();
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jExecutorsCls = env->FindClass("java/util/concurrent/Executors");
jmethodID jNewSingleThreadExecutor = env->GetStaticMethodID(jExecutorsCls,
"newSingleThreadExecutor", "()Ljava/util/concurrent/ExecutorService;");
diff --git a/media/libmediaplayer2/JavaVMHelper.cpp b/media/libmediaplayer2/JavaVMHelper.cpp
new file mode 100644
index 0000000..90aaa7f
--- /dev/null
+++ b/media/libmediaplayer2/JavaVMHelper.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JavaVMHelper"
+
+#include "mediaplayer2/JavaVMHelper.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <stdlib.h>
+
+namespace android {
+
+// static
+std::atomic<JavaVM *> JavaVMHelper::sJavaVM(NULL);
+
+// static
+JNIEnv *JavaVMHelper::getJNIEnv() {
+ JNIEnv *env;
+ JavaVM *vm = sJavaVM.load();
+ CHECK(vm != NULL);
+
+ if (vm->GetEnv((void **)&env, JNI_VERSION_1_4) != JNI_OK) {
+ return NULL;
+ }
+
+ return env;
+}
+
+// static
+void JavaVMHelper::setJavaVM(JavaVM *vm) {
+ sJavaVM.store(vm);
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
new file mode 100644
index 0000000..35091b7
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef JAVA_VM_HELPER_H_
+
+#define JAVA_VM_HELPER_H_
+
+#include "jni.h"
+
+#include <atomic>
+
+namespace android {
+
+struct JavaVMHelper {
+ static JNIEnv *getJNIEnv();
+ static void setJavaVM(JavaVM *vm);
+
+private:
+ // Once a valid JavaVM has been set, it should never be reset or changed.
+ // However, as it may be accessed from multiple threads, access needs to be
+ // synchronized.
+ static std::atomic<JavaVM *> sJavaVM;
+};
+
+} // namespace android
+
+#endif // JAVA_VM_HELPER_H_
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index cf5e91e..ea778a4 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1577,6 +1577,7 @@
{ MEDIA_MIMETYPE_AUDIO_VORBIS, AUDIO_FORMAT_VORBIS },
{ MEDIA_MIMETYPE_AUDIO_OPUS, AUDIO_FORMAT_OPUS},
{ MEDIA_MIMETYPE_AUDIO_AC3, AUDIO_FORMAT_AC3},
+ { MEDIA_MIMETYPE_AUDIO_AC4, AUDIO_FORMAT_AC4},
{ MEDIA_MIMETYPE_AUDIO_FLAC, AUDIO_FORMAT_FLAC},
{ 0, AUDIO_FORMAT_INVALID }
};
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 1695c75..a32cf08 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -50,6 +50,7 @@
const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index 25be89f..b165bcb 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -52,6 +52,7 @@
extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 5cc5093..271d601 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -119,6 +119,7 @@
private:
struct StreamInfo {
unsigned mType;
+ unsigned mTypeExt;
unsigned mPID;
int32_t mCASystemId;
};
@@ -145,10 +146,12 @@
Stream(Program *program,
unsigned elementaryPID,
unsigned streamType,
+ unsigned streamTypeExt,
unsigned PCR_PID,
int32_t CA_system_ID);
unsigned type() const { return mStreamType; }
+ unsigned typeExt() const { return mStreamTypeExt; }
unsigned pid() const { return mElementaryPID; }
void setPID(unsigned pid) { mElementaryPID = pid; }
@@ -194,6 +197,7 @@
Program *mProgram;
unsigned mElementaryPID;
unsigned mStreamType;
+ unsigned mStreamTypeExt;
unsigned mPCR_PID;
int32_t mExpectedContinuityCounter;
@@ -447,7 +451,7 @@
if (descriptor_length > infoLength) {
break;
}
- if (descriptor_tag == 9 && descriptor_length >= 4) {
+ if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
found = true;
caDescriptor->mSystemID = br->getBits(16);
caDescriptor->mPID = br->getBits(16) & 0x1fff;
@@ -513,37 +517,65 @@
// infoBytesRemaining is the number of bytes that make up the
// variable length section of ES_infos. It does not include the
// final CRC.
- size_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
+ int32_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
while (infoBytesRemaining >= 5) {
-
- unsigned streamType = br->getBits(8);
- ALOGV(" stream_type = 0x%02x", streamType);
-
+ StreamInfo info;
+ info.mType = br->getBits(8);
+ ALOGV(" stream_type = 0x%02x", info.mType);
MY_LOGV(" reserved = %u", br->getBits(3));
- unsigned elementaryPID = br->getBits(13);
- ALOGV(" elementary_PID = 0x%04x", elementaryPID);
+ info.mPID = br->getBits(13);
+ ALOGV(" elementary_PID = 0x%04x", info.mPID);
MY_LOGV(" reserved = %u", br->getBits(4));
unsigned ES_info_length = br->getBits(12);
ALOGV(" ES_info_length = %u", ES_info_length);
+ infoBytesRemaining -= 5 + ES_info_length;
CADescriptor streamCA;
- bool hasStreamCA = findCADescriptor(br, ES_info_length, &streamCA);
+ info.mTypeExt = EXT_DESCRIPTOR_DVB_RESERVED_MAX;
+ bool hasStreamCA = false;
+ while (ES_info_length > 2 && infoBytesRemaining >= 0) {
+ unsigned descriptor_tag = br->getBits(8);
+ ALOGV(" tag = 0x%02x", descriptor_tag);
+
+ unsigned descriptor_length = br->getBits(8);
+ ALOGV(" len = %u", descriptor_length);
+
+ ES_info_length -= 2;
+ if (descriptor_length > ES_info_length) {
+ return ERROR_MALFORMED;
+ }
+ if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
+ hasStreamCA = true;
+ streamCA.mSystemID = br->getBits(16);
+ streamCA.mPID = br->getBits(16) & 0x1fff;
+ ES_info_length -= 4;
+ streamCA.mPrivateData.assign(br->data(), br->data() + descriptor_length - 4);
+ } else if (info.mType == STREAMTYPE_PES_PRIVATE_DATA &&
+ descriptor_tag == DESCRIPTOR_DVB_EXTENSION && descriptor_length >= 1) {
+ unsigned descTagExt = br->getBits(8);
+ ALOGV(" tag_ext = 0x%02x", descTagExt);
+ if (descTagExt == EXT_DESCRIPTOR_DVB_AC4) {
+ info.mTypeExt = EXT_DESCRIPTOR_DVB_AC4;
+ }
+ ES_info_length -= descriptor_length;
+ descriptor_length--;
+ br->skipBits(descriptor_length * 8);
+ } else {
+ ES_info_length -= descriptor_length;
+ br->skipBits(descriptor_length * 8);
+ }
+ }
if (hasStreamCA && !mParser->mCasManager->addStream(
- mProgramNumber, elementaryPID, streamCA)) {
+ mProgramNumber, info.mPID, streamCA)) {
return ERROR_MALFORMED;
}
- StreamInfo info;
- info.mType = streamType;
- info.mPID = elementaryPID;
info.mCASystemId = hasProgramCA ? programCA.mSystemID :
hasStreamCA ? streamCA.mSystemID : -1;
infos.push(info);
-
- infoBytesRemaining -= 5 + ES_info_length;
}
if (infoBytesRemaining != 0) {
@@ -602,7 +634,7 @@
if (index < 0) {
sp<Stream> stream = new Stream(
- this, info.mPID, info.mType, PCR_PID, info.mCASystemId);
+ this, info.mPID, info.mType, info.mTypeExt, PCR_PID, info.mCASystemId);
if (mSampleAesKeyItem != NULL) {
stream->signalNewSampleAesKey(mSampleAesKeyItem);
@@ -720,11 +752,13 @@
Program *program,
unsigned elementaryPID,
unsigned streamType,
+ unsigned streamTypeExt,
unsigned PCR_PID,
int32_t CA_system_ID)
: mProgram(program),
mElementaryPID(elementaryPID),
mStreamType(streamType),
+ mStreamTypeExt(streamTypeExt),
mPCR_PID(PCR_PID),
mExpectedContinuityCounter(-1),
mPayloadStarted(false),
@@ -781,6 +815,12 @@
mode = ElementaryStreamQueue::AC3;
break;
+ case STREAMTYPE_PES_PRIVATE_DATA:
+ if (mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4) {
+ mode = ElementaryStreamQueue::AC4;
+ }
+ break;
+
case STREAMTYPE_METADATA:
mode = ElementaryStreamQueue::METADATA;
break;
@@ -989,6 +1029,8 @@
case STREAMTYPE_AAC_ENCRYPTED:
case STREAMTYPE_AC3_ENCRYPTED:
return true;
+ case STREAMTYPE_PES_PRIVATE_DATA:
+ return mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4;
default:
return false;
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 45ca06b..adb4fb2 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -142,6 +142,7 @@
STREAMTYPE_MPEG2_VIDEO = 0x02,
STREAMTYPE_MPEG1_AUDIO = 0x03,
STREAMTYPE_MPEG2_AUDIO = 0x04,
+ STREAMTYPE_PES_PRIVATE_DATA = 0x06,
STREAMTYPE_MPEG2_AUDIO_ADTS = 0x0f,
STREAMTYPE_MPEG4_VIDEO = 0x10,
STREAMTYPE_METADATA = 0x15,
@@ -160,6 +161,20 @@
STREAMTYPE_AC3_ENCRYPTED = 0xC1,
};
+ enum {
+ // From ISO/IEC 13818-1: 2007 (E), Table 2-29
+ DESCRIPTOR_CA = 0x09,
+
+ // DVB BlueBook A038 Table 12
+ DESCRIPTOR_DVB_EXTENSION = 0x7F,
+ };
+
+ // DVB BlueBook A038 Table 109
+ enum {
+ EXT_DESCRIPTOR_DVB_AC4 = 0x15,
+ EXT_DESCRIPTOR_DVB_RESERVED_MAX = 0x7F,
+ };
+
protected:
virtual ~ATSParser();
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 0fa9fcb..3deee7e 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -86,6 +86,21 @@
mCasSessionId = sessionId;
}
+static int32_t readVariableBits(ABitReader &bits, int32_t nbits) {
+ int32_t value = 0;
+ int32_t more_bits = 1;
+
+ while (more_bits) {
+ value += bits.getBits(nbits);
+ more_bits = bits.getBits(1);
+ if (!more_bits)
+ break;
+ value++;
+ value <<= nbits;
+ }
+ return value;
+}
+
// Parse AC3 header assuming the current ptr is start position of syncframe,
// update metadata only applicable, and return the payload size
static unsigned parseAC3SyncFrame(
@@ -199,6 +214,78 @@
return parseAC3SyncFrame(ptr, size, NULL) > 0;
}
+// Parse AC4 header assuming the current ptr is start position of syncframe
+// and update frameSize and metadata.
+static status_t parseAC4SyncFrame(
+ const uint8_t *ptr, size_t size, unsigned &frameSize, sp<MetaData> *metaData) {
+ // ETSI TS 103 190-2 V1.1.1 (2015-09), Annex C
+ // The sync_word can be either 0xAC40 or 0xAC41.
+ static const int kSyncWordAC40 = 0xAC40;
+ static const int kSyncWordAC41 = 0xAC41;
+
+ size_t headerSize = 0;
+ ABitReader bits(ptr, size);
+ int32_t syncWord = bits.getBits(16);
+ if ((syncWord != kSyncWordAC40) && (syncWord != kSyncWordAC41)) {
+ ALOGE("Invalid syncword in AC4 header");
+ return ERROR_MALFORMED;
+ }
+ headerSize += 2;
+
+ frameSize = bits.getBits(16);
+ headerSize += 2;
+ if (frameSize == 0xFFFF) {
+ frameSize = bits.getBits(24);
+ headerSize += 3;
+ }
+
+ if (frameSize == 0) {
+ ALOGE("Invalid frame size in AC4 header");
+ return ERROR_MALFORMED;
+ }
+ frameSize += headerSize;
+ // If the sync_word is 0xAC41, a crc_word is also transmitted.
+ if (syncWord == kSyncWordAC41) {
+ frameSize += 2; // crc_word
+ }
+ ALOGV("AC4 frameSize = %u", frameSize);
+
+ // ETSI TS 103 190-2 V1.1.1 6.2.1.1
+ uint32_t bitstreamVersion = bits.getBits(2);
+ if (bitstreamVersion == 3) {
+ bitstreamVersion += readVariableBits(bits, 2);
+ }
+
+ bits.skipBits(10); // Sequence Counter
+
+ uint32_t bWaitFrames = bits.getBits(1);
+ if (bWaitFrames) {
+ uint32_t waitFrames = bits.getBits(3);
+ if (waitFrames > 0) {
+ bits.skipBits(2); // br_code;
+ }
+ }
+
+ // ETSI TS 103 190 V1.1.1 Table 82
+ bool fsIndex = bits.getBits(1);
+ uint32_t samplingRate = fsIndex ? 48000 : 44100;
+
+ if (metaData != NULL) {
+ ALOGV("dequeueAccessUnitAC4 Setting mFormat");
+ (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC4);
+ (*metaData)->setInt32(kKeyIsSyncFrame, 1);
+ // [FIXME] AC4 channel count is defined per presentation. Provide a default channel count
+ // as stereo for the entire stream.
+ (*metaData)->setInt32(kKeyChannelCount, 2);
+ (*metaData)->setInt32(kKeySampleRate, samplingRate);
+ }
+ return OK;
+}
+
+static status_t IsSeeminglyValidAC4Header(const uint8_t *ptr, size_t size, unsigned &frameSize) {
+ return parseAC4SyncFrame(ptr, size, frameSize, NULL);
+}
+
static bool IsSeeminglyValidADTSHeader(
const uint8_t *ptr, size_t size, size_t *frameLength) {
if (size < 7) {
@@ -416,6 +503,42 @@
break;
}
+ case AC4:
+ {
+ uint8_t *ptr = (uint8_t *)data;
+ unsigned frameSize = 0;
+ ssize_t startOffset = -1;
+
+ // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+ // (Sync header 4 bytes + AC4 toc 3 bytes)
+ if (size < 7) {
+ return ERROR_MALFORMED;
+ }
+ for (size_t i = 0; i < size; ++i) {
+ if (IsSeeminglyValidAC4Header(&ptr[i], size - i, frameSize) == OK) {
+ startOffset = i;
+ break;
+ }
+ }
+
+ if (startOffset < 0) {
+ return ERROR_MALFORMED;
+ }
+
+ if (startOffset > 0) {
+ ALOGI("found something resembling an AC4 syncword at offset %zd",
+ startOffset);
+ }
+ if (frameSize != size - startOffset) {
+ ALOGV("AC4 frame size is %u bytes, while the buffer size is %zd bytes.",
+ frameSize, size - startOffset);
+ }
+
+ data = &ptr[startOffset];
+ size -= startOffset;
+ break;
+ }
+
case MPEG_AUDIO:
{
uint8_t *ptr = (uint8_t *)data;
@@ -649,6 +772,8 @@
return dequeueAccessUnitAAC();
case AC3:
return dequeueAccessUnitAC3();
+ case AC4:
+ return dequeueAccessUnitAC4();
case MPEG_VIDEO:
return dequeueAccessUnitMPEGVideo();
case MPEG4_VIDEO:
@@ -730,6 +855,69 @@
return accessUnit;
}
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAC4() {
+ unsigned syncStartPos = 0;
+ unsigned payloadSize = 0;
+ sp<MetaData> format = new MetaData;
+ ALOGV("dequeueAccessUnit_AC4[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+
+ // A valid AC4 stream should have minimum of 7 bytes in its buffer.
+ // (Sync header 4 bytes + AC4 toc 3 bytes)
+ if (mBuffer->size() < 7) {
+ return NULL;
+ }
+
+ while (true) {
+ if (syncStartPos + 2 >= mBuffer->size()) {
+ return NULL;
+ }
+
+ status_t status = parseAC4SyncFrame(
+ mBuffer->data() + syncStartPos,
+ mBuffer->size() - syncStartPos,
+ payloadSize,
+ &format);
+ if (status == OK) {
+ break;
+ }
+
+ ALOGV("dequeueAccessUnit_AC4[%d]: syncStartPos %u payloadSize %u",
+ mAUIndex, syncStartPos, payloadSize);
+
+ ++syncStartPos;
+ }
+
+ if (mBuffer->size() < syncStartPos + payloadSize) {
+ ALOGV("Not enough buffer size for AC4");
+ return NULL;
+ }
+
+ if (mFormat == NULL) {
+ mFormat = format;
+ }
+
+ int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
+ if (timeUs < 0ll) {
+ ALOGE("negative timeUs");
+ return NULL;
+ }
+ mAUIndex++;
+
+ sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+ memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
+ accessUnit->meta()->setInt64("timeUs", timeUs);
+ accessUnit->meta()->setInt32("isSync", 1);
+
+ memmove(
+ mBuffer->data(),
+ mBuffer->data() + syncStartPos + payloadSize,
+ mBuffer->size() - syncStartPos - payloadSize);
+
+ mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize);
+ return accessUnit;
+}
+
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitPCMAudio() {
if (mBuffer->size() < 4) {
return NULL;
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index ffcb502..399214a 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -38,6 +38,7 @@
H264,
AAC,
AC3,
+ AC4,
MPEG_AUDIO,
MPEG_VIDEO,
MPEG4_VIDEO,
@@ -116,6 +117,7 @@
sp<ABuffer> dequeueAccessUnitH264();
sp<ABuffer> dequeueAccessUnitAAC();
sp<ABuffer> dequeueAccessUnitAC3();
+ sp<ABuffer> dequeueAccessUnitAC4();
sp<ABuffer> dequeueAccessUnitMPEGAudio();
sp<ABuffer> dequeueAccessUnitMPEGVideo();
sp<ABuffer> dequeueAccessUnitMPEG4Video();
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 778f9c2..8f37f7b 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -94,66 +94,66 @@
extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
+extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE __INTRODUCED_IN(21);
extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
+extern const char* AMEDIAFORMAT_KEY_BIT_RATE __INTRODUCED_IN(21);
extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
-extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
-extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
+extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT __INTRODUCED_IN(21);
extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_COMPLEXITY __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_CSD;
-extern const char* AMEDIAFORMAT_KEY_CSD_0;
-extern const char* AMEDIAFORMAT_KEY_CSD_1;
-extern const char* AMEDIAFORMAT_KEY_CSD_2;
+extern const char* AMEDIAFORMAT_KEY_CSD __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_0 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_1 __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_CSD_2 __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_DURATION;
-extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
-extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_DURATION __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_FRAME_RATE __INTRODUCED_IN(21);
extern const char* AMEDIAFORMAT_KEY_GRID_COLUMNS __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_GRID_ROWS __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
-extern const char* AMEDIAFORMAT_KEY_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_HEIGHT __INTRODUCED_IN(21);
extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
-extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
-extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
-extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
-extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
-extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
+extern const char* AMEDIAFORMAT_KEY_IS_ADTS __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_LANGUAGE __INTRODUCED_IN(21);
extern const char* AMEDIAFORMAT_KEY_LATENCY __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_LEVEL __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
-extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
-extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
-extern const char* AMEDIAFORMAT_KEY_MIME;
-extern const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA;
+extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MIME __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_PRIORITY __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_PROFILE __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
-extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
+extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER __INTRODUCED_IN(21);
extern const char* AMEDIAFORMAT_KEY_ROTATION __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_SEI;
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_SEI __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_STRIDE;
-extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID;
+extern const char* AMEDIAFORMAT_KEY_STRIDE __INTRODUCED_IN(21);
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_TILE_HEIGHT __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_TILE_WIDTH __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_TIME_US;
+extern const char* AMEDIAFORMAT_KEY_TIME_US __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_TRACK_ID __INTRODUCED_IN(28);
-extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX __INTRODUCED_IN(28);
+extern const char* AMEDIAFORMAT_KEY_WIDTH __INTRODUCED_IN(21);
bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out) __INTRODUCED_IN(28);
bool AMediaFormat_getRect(AMediaFormat*, const char *name,
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index fb56694..d828d6a 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -32,55 +32,66 @@
AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
- AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+ AMEDIAFORMAT_KEY_AAC_PROFILE; # var introduced=21
AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
- AMEDIAFORMAT_KEY_BIT_RATE; # var
+ AMEDIAFORMAT_KEY_BIT_RATE; # var introduced=21
AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
- AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
- AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
- AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+ AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var introduced=21
+ AMEDIAFORMAT_KEY_CHANNEL_MASK; # var introduced=21
+ AMEDIAFORMAT_KEY_COLOR_FORMAT; # var introduced=21
AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+ AMEDIAFORMAT_KEY_CSD; # var introduced=28
+ AMEDIAFORMAT_KEY_CSD_0; # var introduced=28
+ AMEDIAFORMAT_KEY_CSD_1; # var introduced=28
+ AMEDIAFORMAT_KEY_CSD_2; # var introduced=28
AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
- AMEDIAFORMAT_KEY_DURATION; # var
- AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
- AMEDIAFORMAT_KEY_FRAME_RATE; # var
+ AMEDIAFORMAT_KEY_DISPLAY_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_DISPLAY_WIDTH; # var introduced=28
+ AMEDIAFORMAT_KEY_DURATION; # var introduced=21
+ AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var introduced=21
+ AMEDIAFORMAT_KEY_FRAME_RATE; # var introduced=21
AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
- AMEDIAFORMAT_KEY_HEIGHT; # var
+ AMEDIAFORMAT_KEY_HEIGHT; # var introduced=21
AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
- AMEDIAFORMAT_KEY_IS_ADTS; # var
- AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
- AMEDIAFORMAT_KEY_IS_DEFAULT; # var
- AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
- AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
- AMEDIAFORMAT_KEY_LANGUAGE; # var
+ AMEDIAFORMAT_KEY_IS_ADTS; # var introduced=21
+ AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var introduced=21
+ AMEDIAFORMAT_KEY_IS_DEFAULT; # var introduced=21
+ AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var introduced=21
+ AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var introduced=21
+ AMEDIAFORMAT_KEY_LANGUAGE; # var introduced=21
AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
- AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
- AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
- AMEDIAFORMAT_KEY_MAX_WIDTH; # var
- AMEDIAFORMAT_KEY_MIME; # var
+ AMEDIAFORMAT_KEY_MAX_HEIGHT; # var introduced=21
+ AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var introduced=21
+ AMEDIAFORMAT_KEY_MAX_WIDTH; # var introduced=21
+ AMEDIAFORMAT_KEY_MIME; # var introduced=21
+ AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
- AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
- AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+ AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var introduced=21
+ AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var introduced=21
AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
- AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+ AMEDIAFORMAT_KEY_SAMPLE_RATE; # var introduced=21
+ AMEDIAFORMAT_KEY_SEI; # var introduced=28
AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
- AMEDIAFORMAT_KEY_STRIDE; # var
+ AMEDIAFORMAT_KEY_STRIDE; # var introduced=21
+ AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID; # var introduced=28
AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
AMEDIAFORMAT_KEY_TILE_HEIGHT; # var introduced=28
AMEDIAFORMAT_KEY_TILE_WIDTH; # var introduced=28
+ AMEDIAFORMAT_KEY_TIME_US; # var introduced=28
+ AMEDIAFORMAT_KEY_TRACK_INDEX; # var introduced=28
AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
- AMEDIAFORMAT_KEY_WIDTH; # var
+ AMEDIAFORMAT_KEY_WIDTH; # var introduced=21
AMediaCodecActionCode_isRecoverable; # introduced=28
AMediaCodecActionCode_isTransient; # introduced=28
AMediaCodecCryptoInfo_delete;
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 0276cad..0bb492a 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -62,6 +62,7 @@
#include <media/LinearMap.h>
#include <media/VolumeShaper.h>
+#include <audio_utils/clock.h>
#include <audio_utils/SimpleLog.h>
#include <audio_utils/TimestampVerifier.h>
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 42a5a90..0caa0af 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -110,9 +110,7 @@
status_t status = NO_ERROR;
audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
- if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
- (patch->num_sinks == 0 && patch->num_sources != 2) ||
- patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+ if (!audio_patch_is_valid(patch) || (patch->num_sinks == 0 && patch->num_sources != 2)) {
return BAD_VALUE;
}
// limit number of sources to 1 for now or 2 sources for special cross hw module case.
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 70af5c6..f68bfee 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -857,7 +857,8 @@
if (mType == RECORD
|| mType == MIXER
|| mType == DUPLICATING
- || (mType == DIRECT && audio_is_linear_pcm(mHALFormat))) {
+ || mType == DIRECT
+ || mType == OFFLOAD) {
dprintf(fd, " Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
}
@@ -2482,6 +2483,11 @@
Mutex::Autolock _l(mLock);
// reject out of sequence requests
if ((mDrainSequence & 1) && (sequence == mDrainSequence)) {
+ // Register discontinuity when HW drain is completed because that can cause
+ // the timestamp frame position to reset to 0 for direct and offload threads.
+ // (Out of sequence requests are ignored, since the discontinuity would be handled
+ // elsewhere, e.g. in flush).
+ mTimestampVerifier.discontinuity();
mDrainSequence &= ~1;
mWaitWorkCV.signal();
}
@@ -3190,6 +3196,15 @@
checkSilentMode_l();
+ // DIRECT and OFFLOAD threads should reset frame count to zero on stop/flush
+ // TODO: add confirmation checks:
+ // 1) DIRECT threads and linear PCM format really resets to 0?
+ // 2) Is frame count really valid if not linear pcm?
+ // 3) Are all 64 bits of position returned, not just lowest 32 bits?
+ if (mType == OFFLOAD || mType == DIRECT) {
+ mTimestampVerifier.setDiscontinuityMode(mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
+ }
+
while (!exitPending())
{
// Log merge requests are performed during AudioFlinger binder transactions, but
@@ -3216,7 +3231,8 @@
// Collect timestamp statistics for the Playback Thread types that support it.
if (mType == MIXER
|| mType == DUPLICATING
- || (mType == DIRECT && audio_is_linear_pcm(mHALFormat))) { // no indentation
+ || mType == DIRECT
+ || mType == OFFLOAD) { // no indentation
// Gather the framesReleased counters for all active tracks,
// and associate with the sink frames written out. We need
// this to convert the sink timestamp to the track timestamp.
@@ -5622,6 +5638,7 @@
mOutput->flush();
mHwPaused = false;
mFlushPending = false;
+ mTimestampVerifier.discontinuity(); // DIRECT and OFFLOADED flush resets frame count.
}
int64_t AudioFlinger::DirectOutputThread::computeWaitTimeNs_l() const {
@@ -5956,6 +5973,14 @@
track->presentationComplete(framesWritten, audioHALFrames);
track->reset();
tracksToRemove->add(track);
+ // DIRECT and OFFLOADED stop resets frame counts.
+ if (!mUseAsyncWrite) {
+ // If we don't get explicit drain notification we must
+ // register discontinuity regardless of whether this is
+ // the previous (!last) or the upcoming (last) track
+ // to avoid skipping the discontinuity.
+ mTimestampVerifier.discontinuity();
+ }
}
} else {
// No buffers for this track. Give it a few chances to
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 064e291..0c833f1 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1224,6 +1224,23 @@
virtual bool hasFastMixer() const { return false; }
virtual int64_t computeWaitTimeNs_l() const override;
+
+ status_t threadloop_getHalTimestamp_l(ExtendedTimestamp *timestamp) const override {
+ // For DIRECT and OFFLOAD threads, query the output sink directly.
+ if (mOutput != nullptr) {
+ uint64_t uposition64;
+ struct timespec time;
+ if (mOutput->getPresentationPosition(
+ &uposition64, &time) == OK) {
+ timestamp->mPosition[ExtendedTimestamp::LOCATION_KERNEL]
+ = (int64_t)uposition64;
+ timestamp->mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
+ = audio_utils_ns_from_timespec(&time);
+ return NO_ERROR;
+ }
+ }
+ return INVALID_OPERATION;
+ }
};
class OffloadThread : public DirectOutputThread {
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 4812b1f..fe49483 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -324,11 +324,6 @@
// function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0;
- // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
- // over a telephony device during a phone call.
- virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream) = 0;
- virtual status_t stopTone() = 0;
-
// set down link audio volume.
virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 007eea0..941119b 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -408,8 +408,7 @@
case STRATEGY_SONIFICATION:
- // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
- // handleIncallSonification().
+ // If incall, just select the STRATEGY_PHONE device
if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
device = getDeviceForStrategyInt(
STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 7154cb2..10b9ebe 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -551,14 +551,8 @@
return;
}
/// Opens: can these line be executed after the switch of volume curves???
- // if leaving call state, handle special case of active streams
- // pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(oldState)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
- handleIncallSonification((audio_stream_type_t)stream, false, true);
- }
-
// force reevaluating accessibility routing when call stops
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
@@ -637,14 +631,8 @@
}
}
- // if entering in call state, handle special case of active streams
- // pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(state)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
- handleIncallSonification((audio_stream_type_t)stream, true, true);
- }
-
// force reevaluating accessibility routing when call starts
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
@@ -813,39 +801,53 @@
stream_type_to_audio_attributes(*stream, &attributes);
}
- // TODO: check for existing client for this port ID
- if (*portId == AUDIO_PORT_HANDLE_NONE) {
- *portId = AudioPort::getNextUniqueId();
- }
-
- sp<SwAudioOutputDescriptor> desc;
- if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
- ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
- if (!audio_has_proportional_frames(config->format)) {
- return BAD_VALUE;
- }
- *stream = streamTypefromAttributesInt(&attributes);
- *output = desc->mIoHandle;
- ALOGV("getOutputForAttr() returns output %d", *output);
- return NO_ERROR;
- }
- if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
- ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
- return BAD_VALUE;
- }
-
ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
" session %d selectedDeviceId %d",
attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
session, *selectedDeviceId);
- *stream = streamTypefromAttributesInt(&attributes);
+ // TODO: check for existing client for this port ID
+ if (*portId == AUDIO_PORT_HANDLE_NONE) {
+ *portId = AudioPort::getNextUniqueId();
+ }
- // Explicit routing?
+ // First check for explicit routing (eg. setPreferredDevice)
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
+ } else {
+ // If no explict route, is there a matching dynamic policy that applies?
+ sp<SwAudioOutputDescriptor> desc;
+ if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
+ ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
+ if (!audio_has_proportional_frames(config->format)) {
+ return BAD_VALUE;
+ }
+ *stream = streamTypefromAttributesInt(&attributes);
+ *output = desc->mIoHandle;
+ ALOGV("getOutputForAttr() returns output %d", *output);
+ return NO_ERROR;
+ }
+
+ // Virtual sources must always be dynamicaly or explicitly routed
+ if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+ ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+ return BAD_VALUE;
+ }
}
+
+ // Virtual sources must always be dynamicaly or explicitly routed
+ if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+ ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+ return BAD_VALUE;
+ }
+
+ *stream = streamTypefromAttributesInt(&attributes);
+
+ // TODO: Should this happen only if an explicit route is active?
+ // the previous code structure meant that this would always happen which
+ // would appear to result in adding a null deviceDesc when not using an
+ // explicit route. Is that the intended and necessary behavior?
mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
@@ -1279,11 +1281,6 @@
const uint32_t muteWaitMs =
setOutputDevice(outputDesc, device, force, 0, NULL, address, requiresMuteCheck);
- // handle special case for sonification while in call
- if (isInCall()) {
- handleIncallSonification(stream, true, false);
- }
-
// apply volume rules for current stream and device if necessary
checkAndSetVolume(stream,
mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
@@ -1378,11 +1375,6 @@
// always handle stream stop, check which stream type is stopping
handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
- // handle special case for sonification while in call
- if (isInCall()) {
- handleIncallSonification(stream, false, false);
- }
-
if (outputDesc->mRefCount[stream] > 0) {
// decrement usage count of this stream on the output
outputDesc->changeRefCount(stream, -1);
@@ -2273,11 +2265,10 @@
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
- if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+ if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream) || isInCall())) {
continue;
}
- if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
- (isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
+ if (!(desc->isStreamActive((audio_stream_type_t)curStream) || isInCall())) {
continue;
}
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
@@ -2834,8 +2825,7 @@
}
ALOGV("createAudioPatch() num sources %d num sinks %d", patch->num_sources, patch->num_sinks);
- if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
- patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+ if (!audio_patch_is_valid(patch)) {
return BAD_VALUE;
}
// only one source per audio patch supported for now
@@ -5423,8 +5413,8 @@
return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
}
- // in-call: always cap earpiece volume by voice volume + some low headroom
- if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) &&
+ // in-call: always cap volume by voice volume + some low headroom
+ if ((stream != AUDIO_STREAM_VOICE_CALL) &&
(isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
switch (stream) {
case AUDIO_STREAM_SYSTEM:
@@ -5436,9 +5426,9 @@
case AUDIO_STREAM_DTMF:
case AUDIO_STREAM_ACCESSIBILITY: {
int voiceVolumeIndex =
- mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, AUDIO_DEVICE_OUT_EARPIECE);
+ mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
const float maxVoiceVolDb =
- computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, AUDIO_DEVICE_OUT_EARPIECE)
+ computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
+ IN_CALL_EARPIECE_HEADROOM_DB;
if (volumeDB > maxVoiceVolDb) {
ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
@@ -5551,7 +5541,14 @@
float voiceVolume;
// Force voice volume to max for bluetooth SCO as volume is managed by the headset
if (stream == AUDIO_STREAM_VOICE_CALL) {
- voiceVolume = (float)index/(float)mVolumeCurves->getVolumeIndexMax(stream);
+ // FIXME: issue 111194621: this should not happen
+ int maxIndex = mVolumeCurves->getVolumeIndexMax(stream);
+ if (index > maxIndex) {
+ ALOGW("%s limiting voice call index %d to max index %d",
+ __FUNCTION__, index, maxIndex);
+ index = maxIndex;
+ }
+ voiceVolume = (float)index/(float)maxIndex;
} else {
voiceVolume = 1.0;
}
@@ -5635,55 +5632,6 @@
}
}
-void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
- bool starting, bool stateChange)
-{
- if(!hasPrimaryOutput()) {
- return;
- }
-
- // if the stream pertains to sonification strategy and we are in call we must
- // mute the stream if it is low visibility. If it is high visibility, we must play a tone
- // in the device used for phone strategy and play the tone if the selected device does not
- // interfere with the device used for phone strategy
- // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
- // many times as there are active tracks on the output
- const routing_strategy stream_strategy = getStrategy(stream);
- if ((stream_strategy == STRATEGY_SONIFICATION) ||
- ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) {
- sp<SwAudioOutputDescriptor> outputDesc = mPrimaryOutput;
- ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
- stream, starting, outputDesc->mDevice, stateChange);
- if (outputDesc->mRefCount[stream]) {
- int muteCount = 1;
- if (stateChange) {
- muteCount = outputDesc->mRefCount[stream];
- }
- if (audio_is_low_visibility(stream)) {
- ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
- for (int i = 0; i < muteCount; i++) {
- setStreamMute(stream, starting, mPrimaryOutput);
- }
- } else {
- ALOGV("handleIncallSonification() high visibility");
- if (outputDesc->device() &
- getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) {
- ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
- for (int i = 0; i < muteCount; i++) {
- setStreamMute(stream, starting, mPrimaryOutput);
- }
- }
- if (starting) {
- mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
- AUDIO_STREAM_VOICE_CALL);
- } else {
- mpClientInterface->stopTone();
- }
- }
- }
- }
-}
-
audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
{
// flags to stream type mapping
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 893b963..136e522 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -377,10 +377,6 @@
int delayMs = 0,
audio_devices_t device = (audio_devices_t)0);
- // handle special cases for sonification strategy while in call: mute streams or replace by
- // a special tone in the device used for communication
- void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
-
audio_mode_t getPhoneState();
// true if device is in a telephony or VoIP call
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index b064f8c..21fffec 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -155,17 +155,6 @@
return result;
}
-status_t AudioPolicyService::AudioPolicyClient::startTone(audio_policy_tone_t tone,
- audio_stream_type_t stream)
-{
- return mAudioPolicyService->startTone(tone, stream);
-}
-
-status_t AudioPolicyService::AudioPolicyClient::stopTone()
-{
- return mAudioPolicyService->stopTone();
-}
-
status_t AudioPolicyService::AudioPolicyClient::setVoiceVolume(float volume, int delay_ms)
{
return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 1379223..ca3b6b6 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -67,8 +67,6 @@
{
Mutex::Autolock _l(mLock);
- // start tone playback thread
- mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
// start audio commands thread
mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
// start output activity command thread
@@ -90,7 +88,6 @@
AudioPolicyService::~AudioPolicyService()
{
- mTonePlaybackThread->exit();
mAudioCommandThread->exit();
mOutputCommandThread->exit();
@@ -115,13 +112,17 @@
Mutex::Autolock _l(mNotificationClientsLock);
uid_t uid = IPCThreadState::self()->getCallingUid();
- if (mNotificationClients.indexOfKey(uid) < 0) {
+ pid_t pid = IPCThreadState::self()->getCallingPid();
+ int64_t token = ((int64_t)uid<<32) | pid;
+
+ if (mNotificationClients.indexOfKey(token) < 0) {
sp<NotificationClient> notificationClient = new NotificationClient(this,
client,
- uid);
- ALOGV("registerClient() client %p, uid %d", client.get(), uid);
+ uid,
+ pid);
+ ALOGV("registerClient() client %p, uid %d pid %d", client.get(), uid, pid);
- mNotificationClients.add(uid, notificationClient);
+ mNotificationClients.add(token, notificationClient);
sp<IBinder> binder = IInterface::asBinder(client);
binder->linkToDeath(notificationClient);
@@ -133,22 +134,33 @@
Mutex::Autolock _l(mNotificationClientsLock);
uid_t uid = IPCThreadState::self()->getCallingUid();
- if (mNotificationClients.indexOfKey(uid) < 0) {
+ pid_t pid = IPCThreadState::self()->getCallingPid();
+ int64_t token = ((int64_t)uid<<32) | pid;
+
+ if (mNotificationClients.indexOfKey(token) < 0) {
return;
}
- mNotificationClients.valueFor(uid)->setAudioPortCallbacksEnabled(enabled);
+ mNotificationClients.valueFor(token)->setAudioPortCallbacksEnabled(enabled);
}
// removeNotificationClient() is called when the client process dies.
-void AudioPolicyService::removeNotificationClient(uid_t uid)
+void AudioPolicyService::removeNotificationClient(uid_t uid, pid_t pid)
{
{
Mutex::Autolock _l(mNotificationClientsLock);
- mNotificationClients.removeItem(uid);
+ int64_t token = ((int64_t)uid<<32) | pid;
+ mNotificationClients.removeItem(token);
}
{
Mutex::Autolock _l(mLock);
- if (mAudioPolicyManager) {
+ bool hasSameUid = false;
+ for (size_t i = 0; i < mNotificationClients.size(); i++) {
+ if (mNotificationClients.valueAt(i)->uid() == uid) {
+ hasSameUid = true;
+ break;
+ }
+ }
+ if (mAudioPolicyManager && !hasSameUid) {
// called from binder death notification: no need to clear caller identity
mAudioPolicyManager->releaseResourcesForUid(uid);
}
@@ -236,8 +248,9 @@
AudioPolicyService::NotificationClient::NotificationClient(const sp<AudioPolicyService>& service,
const sp<IAudioPolicyServiceClient>& client,
- uid_t uid)
- : mService(service), mUid(uid), mAudioPolicyServiceClient(client),
+ uid_t uid,
+ pid_t pid)
+ : mService(service), mUid(uid), mPid(pid), mAudioPolicyServiceClient(client),
mAudioPortCallbacksEnabled(false)
{
}
@@ -251,7 +264,7 @@
sp<NotificationClient> keep(this);
sp<AudioPolicyService> service = mService.promote();
if (service != 0) {
- service->removeNotificationClient(mUid);
+ service->removeNotificationClient(mUid, mPid);
}
}
@@ -322,8 +335,6 @@
result.append(buffer);
snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
result.append(buffer);
- snprintf(buffer, SIZE, "Tones Thread: %p\n", mTonePlaybackThread.get());
- result.append(buffer);
write(fd, result.string(), result.size());
return NO_ERROR;
@@ -359,9 +370,6 @@
if (mAudioCommandThread != 0) {
mAudioCommandThread->dump(fd);
}
- if (mTonePlaybackThread != 0) {
- mTonePlaybackThread->dump(fd);
- }
if (mAudioPolicyManager) {
mAudioPolicyManager->dump(fd);
@@ -632,7 +640,6 @@
const wp<AudioPolicyService>& service)
: Thread(false), mName(name), mService(service)
{
- mpToneGenerator = NULL;
}
@@ -642,7 +649,6 @@
release_wake_lock(mName.string());
}
mAudioCommands.clear();
- delete mpToneGenerator;
}
void AudioPolicyService::AudioCommandThread::onFirstRef()
@@ -667,26 +673,6 @@
mLastCommand = command;
switch (command->mCommand) {
- case START_TONE: {
- mLock.unlock();
- ToneData *data = (ToneData *)command->mParam.get();
- ALOGV("AudioCommandThread() processing start tone %d on stream %d",
- data->mType, data->mStream);
- delete mpToneGenerator;
- mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
- mpToneGenerator->startTone(data->mType);
- mLock.lock();
- }break;
- case STOP_TONE: {
- mLock.unlock();
- ALOGV("AudioCommandThread() processing stop tone");
- if (mpToneGenerator != NULL) {
- mpToneGenerator->stopTone();
- delete mpToneGenerator;
- mpToneGenerator = NULL;
- }
- mLock.lock();
- }break;
case SET_VOLUME: {
VolumeData *data = (VolumeData *)command->mParam.get();
ALOGV("AudioCommandThread() processing set volume stream %d, \
@@ -893,27 +879,6 @@
return NO_ERROR;
}
-void AudioPolicyService::AudioCommandThread::startToneCommand(ToneGenerator::tone_type type,
- audio_stream_type_t stream)
-{
- sp<AudioCommand> command = new AudioCommand();
- command->mCommand = START_TONE;
- sp<ToneData> data = new ToneData();
- data->mType = type;
- data->mStream = stream;
- command->mParam = data;
- ALOGV("AudioCommandThread() adding tone start type %d, stream %d", type, stream);
- sendCommand(command);
-}
-
-void AudioPolicyService::AudioCommandThread::stopToneCommand()
-{
- sp<AudioCommand> command = new AudioCommand();
- command->mCommand = STOP_TONE;
- ALOGV("AudioCommandThread() adding tone stop");
- sendCommand(command);
-}
-
status_t AudioPolicyService::AudioCommandThread::volumeCommand(audio_stream_type_t stream,
float volume,
audio_io_handle_t output,
@@ -1250,8 +1215,6 @@
} break;
- case START_TONE:
- case STOP_TONE:
default:
break;
}
@@ -1324,27 +1287,6 @@
output, delayMs);
}
-int AudioPolicyService::startTone(audio_policy_tone_t tone,
- audio_stream_type_t stream)
-{
- if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) {
- ALOGE("startTone: illegal tone requested (%d)", tone);
- }
- if (stream != AUDIO_STREAM_VOICE_CALL) {
- ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
- tone);
- }
- mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
- AUDIO_STREAM_VOICE_CALL);
- return 0;
-}
-
-int AudioPolicyService::stopTone()
-{
- mTonePlaybackThread->stopToneCommand();
- return 0;
-}
-
int AudioPolicyService::setVoiceVolume(float volume, int delayMs)
{
return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
@@ -1400,9 +1342,6 @@
int aps_set_stream_volume(void *service, audio_stream_type_t stream,
float volume, audio_io_handle_t output,
int delay_ms);
-int aps_start_tone(void *service, audio_policy_tone_t tone,
- audio_stream_type_t stream);
-int aps_stop_tone(void *service);
int aps_set_voice_volume(void *service, float volume, int delay_ms);
};
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 09375f1..a1366bb 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -157,8 +157,6 @@
float volume,
audio_io_handle_t output,
int delayMs = 0);
- virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
- virtual status_t stopTone();
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
virtual bool isOffloadSupported(const audio_offload_info_t &config);
@@ -222,7 +220,7 @@
virtual status_t clientSetAudioPortConfig(const struct audio_port_config *config,
int delayMs);
- void removeNotificationClient(uid_t uid);
+ void removeNotificationClient(uid_t uid, pid_t pid);
void onAudioPortListUpdate();
void doOnAudioPortListUpdate();
void onAudioPatchListUpdate();
@@ -304,10 +302,7 @@
std::unordered_map<uid_t, bool> mCachedUids;
};
- // Thread used for tone playback and to send audio config commands to audio flinger
- // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because
- // startTone() and stopTone() are normally called with mLock locked and requesting a tone start
- // or stop will cause calls to AudioPolicyService and an attempt to lock mLock.
+ // Thread used to send audio config commands to audio flinger
// For audio config commands, it is necessary because audio flinger requires that the calling
// process (user) has permission to modify audio settings.
class AudioCommandThread : public Thread {
@@ -316,8 +311,6 @@
// commands for tone AudioCommand
enum {
- START_TONE,
- STOP_TONE,
SET_VOLUME,
SET_PARAMETERS,
SET_VOICE_VOLUME,
@@ -342,9 +335,6 @@
virtual bool threadLoop();
void exit();
- void startToneCommand(ToneGenerator::tone_type type,
- audio_stream_type_t stream);
- void stopToneCommand();
status_t volumeCommand(audio_stream_type_t stream, float volume,
audio_io_handle_t output, int delayMs = 0);
status_t parametersCommand(audio_io_handle_t ioHandle,
@@ -387,7 +377,7 @@
void dump(char* buffer, size_t size);
- int mCommand; // START_TONE, STOP_TONE ...
+ int mCommand; // SET_VOLUME, SET_PARAMETERS...
nsecs_t mTime; // time stamp
Mutex mLock; // mutex associated to mCond
Condition mCond; // condition for status return
@@ -403,12 +393,6 @@
AudioCommandData() {}
};
- class ToneData : public AudioCommandData {
- public:
- ToneGenerator::tone_type mType; // tone type (START_TONE only)
- audio_stream_type_t mStream; // stream type (START_TONE only)
- };
-
class VolumeData : public AudioCommandData {
public:
audio_stream_type_t mStream;
@@ -475,7 +459,6 @@
Mutex mLock;
Condition mWaitWorkCV;
Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
- ToneGenerator *mpToneGenerator; // the tone generator
sp<AudioCommand> mLastCommand; // last processed command (used by dump)
String8 mName; // string used by wake lock fo delayed commands
wp<AudioPolicyService> mService;
@@ -550,11 +533,6 @@
// function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
- // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
- // over a telephony device during a phone call.
- virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
- virtual status_t stopTone();
-
// set down link audio volume.
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
@@ -594,7 +572,7 @@
public:
NotificationClient(const sp<AudioPolicyService>& service,
const sp<IAudioPolicyServiceClient>& client,
- uid_t uid);
+ uid_t uid, pid_t pid);
virtual ~NotificationClient();
void onAudioPortListUpdate();
@@ -607,6 +585,10 @@
audio_patch_handle_t patchHandle);
void setAudioPortCallbacksEnabled(bool enabled);
+ uid_t uid() {
+ return mUid;
+ }
+
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
@@ -616,6 +598,7 @@
const wp<AudioPolicyService> mService;
const uid_t mUid;
+ const pid_t mPid;
const sp<IAudioPolicyServiceClient> mAudioPolicyServiceClient;
bool mAudioPortCallbacksEnabled;
};
@@ -673,14 +656,13 @@
// mLock protects AudioPolicyManager methods that can call into audio flinger
// and possibly back in to audio policy service and acquire mEffectsLock.
sp<AudioCommandThread> mAudioCommandThread; // audio commands thread
- sp<AudioCommandThread> mTonePlaybackThread; // tone playback thread
sp<AudioCommandThread> mOutputCommandThread; // process stop and release output
struct audio_policy_device *mpAudioPolicyDev;
struct audio_policy *mpAudioPolicy;
AudioPolicyInterface *mAudioPolicyManager;
AudioPolicyClient *mAudioPolicyClient;
- DefaultKeyedVector< uid_t, sp<NotificationClient> > mNotificationClients;
+ DefaultKeyedVector< int64_t, sp<NotificationClient> > mNotificationClients;
Mutex mNotificationClientsLock; // protects mNotificationClients
// Manage all effects configured in audio_effects.conf
sp<AudioPolicyEffects> mAudioPolicyEffects;
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index cfa9ab1..b739b88 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -29,3 +29,26 @@
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
include $(BUILD_NATIVE_TEST)
+
+# system/audio.h utilities test
+
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+ libbase \
+ liblog \
+ libmedia_helper \
+ libutils
+
+LOCAL_SRC_FILES := \
+ systemaudio_tests.cpp \
+
+LOCAL_MODULE := systemaudio_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index eb8222c..2ff7675 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -60,9 +60,6 @@
int /*delayMs*/) override { }
String8 getParameters(audio_io_handle_t /*ioHandle*/,
const String8& /*keys*/) override { return String8(); }
- status_t startTone(audio_policy_tone_t /*tone*/,
- audio_stream_type_t /*stream*/) override { return NO_INIT; }
- status_t stopTone() override { return NO_INIT; }
status_t setVoiceVolume(float /*volume*/, int /*delayMs*/) override { return NO_INIT; }
status_t moveEffects(audio_session_t /*session*/,
audio_io_handle_t /*srcOutput*/,
diff --git a/services/audiopolicy/tests/systemaudio_tests.cpp b/services/audiopolicy/tests/systemaudio_tests.cpp
new file mode 100644
index 0000000..abaae52
--- /dev/null
+++ b/services/audiopolicy/tests/systemaudio_tests.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#define LOG_TAG "SysAudio_Test"
+#include <log/log.h>
+#include <media/PatchBuilder.h>
+#include <system/audio.h>
+
+using namespace android;
+
+TEST(SystemAudioTest, PatchInvalid) {
+ audio_patch patch{};
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+ patch.num_sources = AUDIO_PATCH_PORTS_MAX + 1;
+ patch.num_sinks = 1;
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+ patch.num_sources = 1;
+ patch.num_sinks = AUDIO_PATCH_PORTS_MAX + 1;
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+ patch.num_sources = 0;
+ patch.num_sinks = 1;
+ ASSERT_FALSE(audio_patch_is_valid(&patch));
+}
+
+TEST(SystemAudioTest, PatchValid) {
+ const audio_port_config src = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+ // It's OK not to have sinks.
+ ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).patch()));
+ const audio_port_config sink = {
+ .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patch_is_valid(
+ (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patch_is_valid(
+ (PatchBuilder{}).addSource(src).addSink(sink).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patch_is_valid(
+ (PatchBuilder{}).addSource(src).addSource(src).
+ addSink(sink).addSink(sink).patch()));
+}
+
+TEST(SystemAudioTest, PatchHwAvSync) {
+ audio_port_config device_src_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+ device_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+ device_src_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_src_cfg));
+
+ audio_port_config device_sink_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+ device_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+ device_sink_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
+
+ audio_port_config mix_sink_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_MIX };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+ mix_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+ mix_sink_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
+
+ audio_port_config mix_src_cfg = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_MIX };
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+ mix_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+ mix_src_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
+}
+
+TEST(SystemAudioTest, PatchEqual) {
+ const audio_patch patch1{}, patch2{};
+ // Invalid patches are not equal.
+ ASSERT_FALSE(audio_patches_are_equal(&patch1, &patch2));
+ const audio_port_config src = {
+ .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
+ const audio_port_config sink = {
+ .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
+ ASSERT_FALSE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+ ASSERT_TRUE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink).patch()));
+ ASSERT_FALSE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+ (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
+ audio_port_config sink_hw_av_sync = sink;
+ sink_hw_av_sync.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ sink_hw_av_sync.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ ASSERT_FALSE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+ ASSERT_TRUE(audio_patches_are_equal(
+ (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch(),
+ (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
+}
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 0ce4318..3be6399 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -336,6 +336,7 @@
const hardware::hidl_string& /*fqName*/,
const hardware::hidl_string& name,
bool /*preexisting*/) {
+ std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
{
std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -458,6 +459,7 @@
}
status_t CameraProviderManager::removeProvider(const std::string& provider) {
+ std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
std::unique_lock<std::mutex> lock(mInterfaceMutex);
std::vector<String8> removedDeviceIds;
status_t res = NAME_NOT_FOUND;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index b8b8b8c..c523c2d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -246,6 +246,9 @@
wp<StatusListener> mListener;
ServiceInteractionProxy* mServiceProxy;
+ // mProviderLifecycleLock is locked during onRegistration and removeProvider
+ mutable std::mutex mProviderLifecycleLock;
+
static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
struct ProviderInfo :