Merge \\"AudioFlinger: fix capture stop sequence\\" into nyc-mr1-dev am: a5bcf56619
am: d7fdb3b5c3
Change-Id: Icd9355083c77ae2e0b7fb2ed12d675105483d3dd
diff --git a/drm/libmediadrm/Drm.cpp b/drm/libmediadrm/Drm.cpp
index 7c1f5c8..9ab08db 100644
--- a/drm/libmediadrm/Drm.cpp
+++ b/drm/libmediadrm/Drm.cpp
@@ -61,7 +61,7 @@
}
struct DrmSessionClient : public DrmSessionClientInterface {
- DrmSessionClient(Drm* drm) : mDrm(drm) {}
+ explicit DrmSessionClient(Drm* drm) : mDrm(drm) {}
virtual bool reclaimSession(const Vector<uint8_t>& sessionId) {
sp<Drm> drm = mDrm.promote();
diff --git a/include/media/MediaDefs.h b/include/media/MediaDefs.h
new file mode 100644
index 0000000..5f2a32d
--- /dev/null
+++ b/include/media/MediaDefs.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_DEFS_H_
+
+#define MEDIA_DEFS_H_
+
+namespace android {
+
+extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
+
+extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
+extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
+extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
+extern const char *MEDIA_MIMETYPE_VIDEO_H263;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
+extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
+extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
+
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG; // layer III
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
+extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
+extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
+extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
+
+extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
+extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
+extern const char *MEDIA_MIMETYPE_TEXT_VTT;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
+extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
+
+// These are values exported to JAVA API that need to be in sync with
+// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
+// they are not defined in frameworks/av, so defining them here.
+enum AudioEncoding {
+ kAudioEncodingPcm16bit = 2,
+ kAudioEncodingPcm8bit = 3,
+ kAudioEncodingPcmFloat = 4,
+};
+
+} // namespace android
+
+#endif // MEDIA_DEFS_H_
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 29a9734..b313246 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -94,10 +94,6 @@
DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
};
- static bool isFlexibleColorFormat(
- const sp<IOMX> &omx, IOMX::node_id node,
- uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent);
-
// Returns 0 if configuration is not supported. NOTE: this is treated by
// some OMX components as auto level, and by others as invalid level.
static int /* OMX_VIDEO_AVCLEVELTYPE */ getAVCLevelFor(
@@ -342,9 +338,6 @@
ssize_t *index = NULL);
status_t setComponentRole(bool isEncoder, const char *mime);
- static const char *getComponentRole(bool isEncoder, const char *mime);
- static status_t setComponentRole(
- const sp<IOMX> &omx, IOMX::node_id node, const char *role);
status_t configureCodec(const char *mime, const sp<AMessage> &msg);
@@ -552,11 +545,6 @@
OMX_ERRORTYPE error = OMX_ErrorUndefined,
status_t internalError = UNKNOWN_ERROR);
- static bool describeDefaultColorFormat(DescribeColorFormat2Params &describeParams);
- static bool describeColorFormat(
- const sp<IOMX> &omx, IOMX::node_id node,
- DescribeColorFormat2Params &describeParams);
-
status_t requestIDRFrame();
status_t setParameters(const sp<AMessage> ¶ms);
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 5f2a32d..359fb69 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,69 +14,18 @@
* limitations under the License.
*/
-#ifndef MEDIA_DEFS_H_
-#define MEDIA_DEFS_H_
+#ifndef STAGEFRIGHT_MEDIA_DEFS_H_
+#define STAGEFRIGHT_MEDIA_DEFS_H_
-namespace android {
+/*
+ * Please, DO NOT USE!
+ *
+ * This file is here only for legacy reasons. Instead, include directly
+ * the header below.
+ *
+ */
-extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
+#include <media/MediaDefs.h>
-extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
-extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
-extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
-extern const char *MEDIA_MIMETYPE_VIDEO_H263;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
-extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
-extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
-
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG; // layer III
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
-extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
-extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
-extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
-extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
-extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
-extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
-
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
-extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
-extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
-extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
-
-extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
-
-extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
-extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
-extern const char *MEDIA_MIMETYPE_TEXT_VTT;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
-extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
-
-// These are values exported to JAVA API that need to be in sync with
-// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
-// they are not defined in frameworks/av, so defining them here.
-enum AudioEncoding {
- kAudioEncodingPcm16bit = 2,
- kAudioEncodingPcm8bit = 3,
- kAudioEncodingPcmFloat = 4,
-};
-
-} // namespace android
-
-#endif // MEDIA_DEFS_H_
+#endif // STAGEFRIGHT_MEDIA_DEFS_H_
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 1b92123..20c390b 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -49,6 +49,7 @@
IResourceManagerService.cpp \
IStreamSource.cpp \
MediaCodecInfo.cpp \
+ MediaDefs.cpp \
MediaUtils.cpp \
Metadata.cpp \
mediarecorder.cpp \
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index ee7f757..f352f73 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -302,6 +302,10 @@
int32_t offset = data.readInt32();
int32_t numSubSamples = data.readInt32();
+ if (numSubSamples < 0 || numSubSamples > 0xffff) {
+ reply->writeInt32(BAD_VALUE);
+ return OK;
+ }
CryptoPlugin::SubSample *subSamples =
new CryptoPlugin::SubSample[numSubSamples];
diff --git a/media/libmedia/IMediaCodecService.cpp b/media/libmedia/IMediaCodecService.cpp
index dcf2b27..2d62419 100644
--- a/media/libmedia/IMediaCodecService.cpp
+++ b/media/libmedia/IMediaCodecService.cpp
@@ -33,7 +33,7 @@
class BpMediaCodecService : public BpInterface<IMediaCodecService>
{
public:
- BpMediaCodecService(const sp<IBinder>& impl)
+ explicit BpMediaCodecService(const sp<IBinder>& impl)
: BpInterface<IMediaCodecService>(impl)
{
}
diff --git a/media/libmedia/IMediaDrmService.cpp b/media/libmedia/IMediaDrmService.cpp
index 9b6ecfd..84812dc 100644
--- a/media/libmedia/IMediaDrmService.cpp
+++ b/media/libmedia/IMediaDrmService.cpp
@@ -37,7 +37,7 @@
class BpMediaDrmService: public BpInterface<IMediaDrmService>
{
public:
- BpMediaDrmService(const sp<IBinder>& impl)
+ explicit BpMediaDrmService(const sp<IBinder>& impl)
: BpInterface<IMediaDrmService>(impl)
{
}
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index e8ad75b..eb88efd 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -43,7 +43,7 @@
class BpMediaExtractor : public BpInterface<IMediaExtractor> {
public:
- BpMediaExtractor(const sp<IBinder>& impl)
+ explicit BpMediaExtractor(const sp<IBinder>& impl)
: BpInterface<IMediaExtractor>(impl)
{
}
diff --git a/media/libmedia/IMediaExtractorService.cpp b/media/libmedia/IMediaExtractorService.cpp
index dcbbde2..4276bcd 100644
--- a/media/libmedia/IMediaExtractorService.cpp
+++ b/media/libmedia/IMediaExtractorService.cpp
@@ -33,7 +33,7 @@
class BpMediaExtractorService : public BpInterface<IMediaExtractorService>
{
public:
- BpMediaExtractorService(const sp<IBinder>& impl)
+ explicit BpMediaExtractorService(const sp<IBinder>& impl)
: BpInterface<IMediaExtractorService>(impl)
{
}
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index d2b4291..2d010d1 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -107,7 +107,7 @@
class BpMediaSource : public BpInterface<IMediaSource> {
public:
- BpMediaSource(const sp<IBinder>& impl)
+ explicit BpMediaSource(const sp<IBinder>& impl)
: BpInterface<IMediaSource>(impl)
{
}
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 1b3b3eb..62a7bdf 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -121,9 +121,11 @@
}
bool MediaCodecInfo::hasQuirk(const char *name) const {
- for (size_t ix = 0; ix < mQuirks.size(); ix++) {
- if (mQuirks.itemAt(ix).equalsIgnoreCase(name)) {
- return true;
+ if (name) {
+ for (size_t ix = 0; ix < mQuirks.size(); ix++) {
+ if (mQuirks.itemAt(ix).equalsIgnoreCase(name)) {
+ return true;
+ }
}
}
return false;
@@ -190,9 +192,11 @@
}
ssize_t MediaCodecInfo::getCapabilityIndex(const char *mime) const {
- for (size_t ix = 0; ix < mCaps.size(); ix++) {
- if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
- return ix;
+ if (mime) {
+ for (size_t ix = 0; ix < mCaps.size(); ix++) {
+ if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
+ return ix;
+ }
}
}
return -1;
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libmedia/MediaDefs.cpp
similarity index 98%
rename from media/libstagefright/MediaDefs.cpp
rename to media/libmedia/MediaDefs.cpp
index 845462b..a2110c9 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libmedia/MediaDefs.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include <media/stagefright/MediaDefs.h>
+#include <media/MediaDefs.h>
namespace android {
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 3fffdc1a..db24b33 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -304,8 +304,9 @@
notifyVideoSizeChanged();
}
- uint32_t flags = FLAG_CAN_PAUSE;
+ uint32_t flags = 0;
if (mLiveSession->isSeekable()) {
+ flags |= FLAG_CAN_PAUSE;
flags |= FLAG_CAN_SEEK;
flags |= FLAG_CAN_SEEK_BACKWARD;
flags |= FLAG_CAN_SEEK_FORWARD;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 978d360..73b07bb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -72,37 +72,37 @@
if (cc->mData1 >= 0x20 && cc->mData1 <= 0x7f) {
// 2 basic chars
- sprintf(tmp, "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
&& cc->mData2 >= 0x30 && cc->mData2 <= 0x3f) {
// 1 special char
- sprintf(tmp, "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 == 0x12 || cc->mData1 == 0x1A)
&& cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
// 1 Spanish/French char
- sprintf(tmp, "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 == 0x13 || cc->mData1 == 0x1B)
&& cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
// 1 Portuguese/German/Danish char
- sprintf(tmp, "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
&& cc->mData2 >= 0x20 && cc->mData2 <= 0x2f){
// Mid-Row Codes (Table 69)
- sprintf(tmp, "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if (((cc->mData1 == 0x14 || cc->mData1 == 0x1c)
&& cc->mData2 >= 0x20 && cc->mData2 <= 0x2f)
||
((cc->mData1 == 0x17 || cc->mData1 == 0x1f)
&& cc->mData2 >= 0x21 && cc->mData2 <= 0x23)){
// Misc Control Codes (Table 70)
- sprintf(tmp, "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 & 0x70) == 0x10
&& (cc->mData2 & 0x40) == 0x40
&& ((cc->mData1 & 0x07) || !(cc->mData2 & 0x20)) ) {
// Preamble Address Codes (Table 71)
- sprintf(tmp, "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else {
- sprintf(tmp, "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
}
if (out.size() > 0) {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 6382ac6..0282303 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1608,11 +1608,11 @@
status_t ACodec::setComponentRole(
bool isEncoder, const char *mime) {
- const char *role = getComponentRole(isEncoder, mime);
+ const char *role = GetComponentRole(isEncoder, mime);
if (role == NULL) {
return BAD_VALUE;
}
- status_t err = setComponentRole(mOMX, mNode, role);
+ status_t err = SetComponentRole(mOMX, mNode, role);
if (err != OK) {
ALOGW("[%s] Failed to set standard component role '%s'.",
mComponentName.c_str(), role);
@@ -1620,98 +1620,6 @@
return err;
}
-//static
-const char *ACodec::getComponentRole(
- bool isEncoder, const char *mime) {
- struct MimeToRole {
- const char *mime;
- const char *decoderRole;
- const char *encoderRole;
- };
-
- static const MimeToRole kMimeToRole[] = {
- { MEDIA_MIMETYPE_AUDIO_MPEG,
- "audio_decoder.mp3", "audio_encoder.mp3" },
- { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I,
- "audio_decoder.mp1", "audio_encoder.mp1" },
- { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
- "audio_decoder.mp2", "audio_encoder.mp2" },
- { MEDIA_MIMETYPE_AUDIO_AMR_NB,
- "audio_decoder.amrnb", "audio_encoder.amrnb" },
- { MEDIA_MIMETYPE_AUDIO_AMR_WB,
- "audio_decoder.amrwb", "audio_encoder.amrwb" },
- { MEDIA_MIMETYPE_AUDIO_AAC,
- "audio_decoder.aac", "audio_encoder.aac" },
- { MEDIA_MIMETYPE_AUDIO_VORBIS,
- "audio_decoder.vorbis", "audio_encoder.vorbis" },
- { MEDIA_MIMETYPE_AUDIO_OPUS,
- "audio_decoder.opus", "audio_encoder.opus" },
- { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
- "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
- { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
- "audio_decoder.g711alaw", "audio_encoder.g711alaw" },
- { MEDIA_MIMETYPE_VIDEO_AVC,
- "video_decoder.avc", "video_encoder.avc" },
- { MEDIA_MIMETYPE_VIDEO_HEVC,
- "video_decoder.hevc", "video_encoder.hevc" },
- { MEDIA_MIMETYPE_VIDEO_MPEG4,
- "video_decoder.mpeg4", "video_encoder.mpeg4" },
- { MEDIA_MIMETYPE_VIDEO_H263,
- "video_decoder.h263", "video_encoder.h263" },
- { MEDIA_MIMETYPE_VIDEO_VP8,
- "video_decoder.vp8", "video_encoder.vp8" },
- { MEDIA_MIMETYPE_VIDEO_VP9,
- "video_decoder.vp9", "video_encoder.vp9" },
- { MEDIA_MIMETYPE_AUDIO_RAW,
- "audio_decoder.raw", "audio_encoder.raw" },
- { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
- "video_decoder.dolby-vision", "video_encoder.dolby-vision" },
- { MEDIA_MIMETYPE_AUDIO_FLAC,
- "audio_decoder.flac", "audio_encoder.flac" },
- { MEDIA_MIMETYPE_AUDIO_MSGSM,
- "audio_decoder.gsm", "audio_encoder.gsm" },
- { MEDIA_MIMETYPE_VIDEO_MPEG2,
- "video_decoder.mpeg2", "video_encoder.mpeg2" },
- { MEDIA_MIMETYPE_AUDIO_AC3,
- "audio_decoder.ac3", "audio_encoder.ac3" },
- { MEDIA_MIMETYPE_AUDIO_EAC3,
- "audio_decoder.eac3", "audio_encoder.eac3" },
- };
-
- static const size_t kNumMimeToRole =
- sizeof(kMimeToRole) / sizeof(kMimeToRole[0]);
-
- size_t i;
- for (i = 0; i < kNumMimeToRole; ++i) {
- if (!strcasecmp(mime, kMimeToRole[i].mime)) {
- break;
- }
- }
-
- if (i == kNumMimeToRole) {
- return NULL;
- }
-
- return isEncoder ? kMimeToRole[i].encoderRole
- : kMimeToRole[i].decoderRole;
-}
-
-//static
-status_t ACodec::setComponentRole(
- const sp<IOMX> &omx, IOMX::node_id node, const char *role) {
- OMX_PARAM_COMPONENTROLETYPE roleParams;
- InitOMXParams(&roleParams);
-
- strncpy((char *)roleParams.cRole,
- role, OMX_MAX_STRINGNAME_SIZE - 1);
-
- roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
-
- return omx->setParameter(
- node, OMX_IndexParamStandardComponentRole,
- &roleParams, sizeof(roleParams));
-}
-
status_t ACodec::configureCodec(
const char *mime, const sp<AMessage> &msg) {
int32_t encoder;
@@ -2068,7 +1976,7 @@
}
ALOGD("[%s] Requested output format %#x and got %#x.",
mComponentName.c_str(), requestedColorFormat, colorFormat);
- if (!isFlexibleColorFormat(
+ if (!IsFlexibleColorFormat(
mOMX, mNode, colorFormat, haveNativeWindow, &flexibleEquivalent)
|| flexibleEquivalent != (OMX_U32)requestedColorFormat) {
// device did not handle flex-YUV request for native window, fall back
@@ -3089,7 +2997,7 @@
// substitute back flexible color format to codec supported format
OMX_U32 flexibleEquivalent;
if (compressionFormat == OMX_VIDEO_CodingUnused
- && isFlexibleColorFormat(
+ && IsFlexibleColorFormat(
mOMX, mNode, format.eColorFormat, usingNativeBuffers, &flexibleEquivalent)
&& colorFormat == flexibleEquivalent) {
ALOGI("[%s] using color format %#x in place of %#x",
@@ -3193,7 +3101,7 @@
// find best legacy non-standard format
OMX_U32 flexibleEquivalent;
if (legacyFormat.eColorFormat == OMX_COLOR_FormatUnused
- && isFlexibleColorFormat(
+ && IsFlexibleColorFormat(
mOMX, mNode, format.eColorFormat, false /* usingNativeBuffers */,
&flexibleEquivalent)
&& flexibleEquivalent == OMX_COLOR_FormatYUV420Flexible) {
@@ -4678,188 +4586,6 @@
}
}
-// static
-bool ACodec::describeDefaultColorFormat(DescribeColorFormat2Params ¶ms) {
- MediaImage2 &image = params.sMediaImage;
- memset(&image, 0, sizeof(image));
-
- image.mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
- image.mNumPlanes = 0;
-
- const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
- image.mWidth = params.nFrameWidth;
- image.mHeight = params.nFrameHeight;
-
- // only supporting YUV420
- if (fmt != OMX_COLOR_FormatYUV420Planar &&
- fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
- fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
- fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
- fmt != (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YV12) {
- ALOGW("do not know color format 0x%x = %d", fmt, fmt);
- return false;
- }
-
- // TEMPORARY FIX for some vendors that advertise sliceHeight as 0
- if (params.nStride != 0 && params.nSliceHeight == 0) {
- ALOGW("using sliceHeight=%u instead of what codec advertised (=0)",
- params.nFrameHeight);
- params.nSliceHeight = params.nFrameHeight;
- }
-
- // we need stride and slice-height to be non-zero and sensible. These values were chosen to
- // prevent integer overflows further down the line, and do not indicate support for
- // 32kx32k video.
- if (params.nStride == 0 || params.nSliceHeight == 0
- || params.nStride > 32768 || params.nSliceHeight > 32768) {
- ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
- fmt, fmt, params.nStride, params.nSliceHeight);
- return false;
- }
-
- // set-up YUV format
- image.mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
- image.mNumPlanes = 3;
- image.mBitDepth = 8;
- image.mBitDepthAllocated = 8;
- image.mPlane[image.Y].mOffset = 0;
- image.mPlane[image.Y].mColInc = 1;
- image.mPlane[image.Y].mRowInc = params.nStride;
- image.mPlane[image.Y].mHorizSubsampling = 1;
- image.mPlane[image.Y].mVertSubsampling = 1;
-
- switch ((int)fmt) {
- case HAL_PIXEL_FORMAT_YV12:
- if (params.bUsingNativeBuffers) {
- size_t ystride = align(params.nStride, 16);
- size_t cstride = align(params.nStride / 2, 16);
- image.mPlane[image.Y].mRowInc = ystride;
-
- image.mPlane[image.V].mOffset = ystride * params.nSliceHeight;
- image.mPlane[image.V].mColInc = 1;
- image.mPlane[image.V].mRowInc = cstride;
- image.mPlane[image.V].mHorizSubsampling = 2;
- image.mPlane[image.V].mVertSubsampling = 2;
-
- image.mPlane[image.U].mOffset = image.mPlane[image.V].mOffset
- + (cstride * params.nSliceHeight / 2);
- image.mPlane[image.U].mColInc = 1;
- image.mPlane[image.U].mRowInc = cstride;
- image.mPlane[image.U].mHorizSubsampling = 2;
- image.mPlane[image.U].mVertSubsampling = 2;
- break;
- } else {
- // fall through as YV12 is used for YUV420Planar by some codecs
- }
-
- case OMX_COLOR_FormatYUV420Planar:
- case OMX_COLOR_FormatYUV420PackedPlanar:
- image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
- image.mPlane[image.U].mColInc = 1;
- image.mPlane[image.U].mRowInc = params.nStride / 2;
- image.mPlane[image.U].mHorizSubsampling = 2;
- image.mPlane[image.U].mVertSubsampling = 2;
-
- image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset
- + (params.nStride * params.nSliceHeight / 4);
- image.mPlane[image.V].mColInc = 1;
- image.mPlane[image.V].mRowInc = params.nStride / 2;
- image.mPlane[image.V].mHorizSubsampling = 2;
- image.mPlane[image.V].mVertSubsampling = 2;
- break;
-
- case OMX_COLOR_FormatYUV420SemiPlanar:
- // FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
- case OMX_COLOR_FormatYUV420PackedSemiPlanar:
- // NV12
- image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
- image.mPlane[image.U].mColInc = 2;
- image.mPlane[image.U].mRowInc = params.nStride;
- image.mPlane[image.U].mHorizSubsampling = 2;
- image.mPlane[image.U].mVertSubsampling = 2;
-
- image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset + 1;
- image.mPlane[image.V].mColInc = 2;
- image.mPlane[image.V].mRowInc = params.nStride;
- image.mPlane[image.V].mHorizSubsampling = 2;
- image.mPlane[image.V].mVertSubsampling = 2;
- break;
-
- default:
- TRESPASS();
- }
- return true;
-}
-
-// static
-bool ACodec::describeColorFormat(
- const sp<IOMX> &omx, IOMX::node_id node,
- DescribeColorFormat2Params &describeParams)
-{
- OMX_INDEXTYPE describeColorFormatIndex;
- if (omx->getExtensionIndex(
- node, "OMX.google.android.index.describeColorFormat",
- &describeColorFormatIndex) == OK) {
- DescribeColorFormatParams describeParamsV1(describeParams);
- if (omx->getParameter(
- node, describeColorFormatIndex,
- &describeParamsV1, sizeof(describeParamsV1)) == OK) {
- describeParams.initFromV1(describeParamsV1);
- return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
- }
- } else if (omx->getExtensionIndex(
- node, "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
- && omx->getParameter(
- node, describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
- return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
- }
-
- return describeDefaultColorFormat(describeParams);
-}
-
-// static
-bool ACodec::isFlexibleColorFormat(
- const sp<IOMX> &omx, IOMX::node_id node,
- uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
- DescribeColorFormat2Params describeParams;
- InitOMXParams(&describeParams);
- describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
- // reasonable dummy values
- describeParams.nFrameWidth = 128;
- describeParams.nFrameHeight = 128;
- describeParams.nStride = 128;
- describeParams.nSliceHeight = 128;
- describeParams.bUsingNativeBuffers = (OMX_BOOL)usingNativeBuffers;
-
- CHECK(flexibleEquivalent != NULL);
-
- if (!describeColorFormat(omx, node, describeParams)) {
- return false;
- }
-
- const MediaImage2 &img = describeParams.sMediaImage;
- if (img.mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
- if (img.mNumPlanes != 3
- || img.mPlane[img.Y].mHorizSubsampling != 1
- || img.mPlane[img.Y].mVertSubsampling != 1) {
- return false;
- }
-
- // YUV 420
- if (img.mPlane[img.U].mHorizSubsampling == 2
- && img.mPlane[img.U].mVertSubsampling == 2
- && img.mPlane[img.V].mHorizSubsampling == 2
- && img.mPlane[img.V].mVertSubsampling == 2) {
- // possible flexible YUV420 format
- if (img.mBitDepth <= 8) {
- *flexibleEquivalent = OMX_COLOR_FormatYUV420Flexible;
- return true;
- }
- }
- }
- return false;
-}
-
status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> ¬ify) {
const char *niceIndex = portIndex == kPortIndexInput ? "input" : "output";
OMX_PARAM_PORTDEFINITIONTYPE def;
@@ -4900,7 +4626,7 @@
describeParams.nSliceHeight = videoDef->nSliceHeight;
describeParams.bUsingNativeBuffers = OMX_FALSE;
- if (describeColorFormat(mOMX, mNode, describeParams)) {
+ if (DescribeColorFormat(mOMX, mNode, describeParams)) {
notify->setBuffer(
"image-data",
ABuffer::CreateAsCopy(
@@ -8004,7 +7730,7 @@
const AString &name, const AString &mime, bool isEncoder,
sp<MediaCodecInfo::Capabilities> *caps) {
(*caps).clear();
- const char *role = getComponentRole(isEncoder, mime.c_str());
+ const char *role = GetComponentRole(isEncoder, mime.c_str());
if (role == NULL) {
return BAD_VALUE;
}
@@ -8025,7 +7751,7 @@
return err;
}
- err = setComponentRole(omx, node, role);
+ err = SetComponentRole(omx, node, role);
if (err != OK) {
omx->freeNode(node);
client.disconnect();
@@ -8074,7 +7800,7 @@
}
OMX_U32 flexibleEquivalent;
- if (isFlexibleColorFormat(
+ if (IsFlexibleColorFormat(
omx, node, portFormat.eColorFormat, false /* usingNativeWindow */,
&flexibleEquivalent)) {
bool marked = false;
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 98bcc56..b07d914 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -35,7 +35,6 @@
MediaCodecList.cpp \
MediaCodecListOverrides.cpp \
MediaCodecSource.cpp \
- MediaDefs.cpp \
MediaExtractor.cpp \
MediaSync.cpp \
MidiExtractor.cpp \
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index b55e49c..a8cbc12 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -807,6 +807,10 @@
ALOGE("b/23540914");
return ERROR_MALFORMED;
}
+ if (depth > 100) {
+ ALOGE("b/27456299");
+ return ERROR_MALFORMED;
+ }
uint32_t hdr[2];
if (mDataSource->readAt(*offset, hdr, 8) < 8) {
return ERROR_IO;
@@ -2239,6 +2243,12 @@
return UNKNOWN_ERROR; // stop parsing after sidx
}
+ case FOURCC('a', 'c', '-', '3'):
+ {
+ *offset += chunk_size;
+ return parseAC3SampleEntry(data_offset);
+ }
+
default:
{
// check if we're parsing 'ilst' for meta keys
@@ -2255,6 +2265,99 @@
return OK;
}
+status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
+ // skip 16 bytes:
+ // + 6-byte reserved,
+ // + 2-byte data reference index,
+ // + 8-byte reserved
+ offset += 16;
+ uint16_t channelCount;
+ if (!mDataSource->getUInt16(offset, &channelCount)) {
+ return ERROR_MALFORMED;
+ }
+ // skip 8 bytes:
+ // + 2-byte channelCount,
+ // + 2-byte sample size,
+ // + 4-byte reserved
+ offset += 8;
+ uint16_t sampleRate;
+ if (!mDataSource->getUInt16(offset, &sampleRate)) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read sample rate");
+ return ERROR_MALFORMED;
+ }
+
+ // skip 4 bytes:
+ // + 2-byte sampleRate,
+ // + 2-byte reserved
+ offset += 4;
+ return parseAC3SpecificBox(offset, sampleRate);
+}
+
+status_t MPEG4Extractor::parseAC3SpecificBox(
+ off64_t offset, uint16_t sampleRate) {
+ uint32_t size;
+ // + 4-byte size
+ // + 4-byte type
+ // + 3-byte payload
+ const uint32_t kAC3SpecificBoxSize = 11;
+ if (!mDataSource->getUInt32(offset, &size) || size < kAC3SpecificBoxSize) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read specific box size");
+ return ERROR_MALFORMED;
+ }
+
+ offset += 4;
+ uint32_t type;
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '3')) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 specific block: header not dac3");
+ return ERROR_MALFORMED;
+ }
+
+ offset += 4;
+ const uint32_t kAC3SpecificBoxPayloadSize = 3;
+ uint8_t chunk[kAC3SpecificBoxPayloadSize];
+ if (mDataSource->readAt(offset, chunk, sizeof(chunk)) != sizeof(chunk)) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 specific block: bitstream fields");
+ return ERROR_MALFORMED;
+ }
+
+ ABitReader br(chunk, sizeof(chunk));
+ static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+ static const unsigned sampleRateTable[] = {48000, 44100, 32000};
+
+ unsigned fscod = br.getBits(2);
+ if (fscod == 3) {
+ ALOGE("Incorrect fscod (3) in AC3 header");
+ return ERROR_MALFORMED;
+ }
+ unsigned boxSampleRate = sampleRateTable[fscod];
+ if (boxSampleRate != sampleRate) {
+ ALOGE("sample rate mismatch: boxSampleRate = %d, sampleRate = %d",
+ boxSampleRate, sampleRate);
+ return ERROR_MALFORMED;
+ }
+
+ unsigned bsid = br.getBits(5);
+ if (bsid > 8) {
+ ALOGW("Incorrect bsid in AC3 header. Possibly E-AC-3?");
+ return ERROR_MALFORMED;
+ }
+
+ // skip
+ unsigned bsmod __unused = br.getBits(3);
+
+ unsigned acmod = br.getBits(3);
+ unsigned lfeon = br.getBits(1);
+ unsigned channelCount = channelCountTable[acmod] + lfeon;
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+ mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
+ mLastTrack->meta->setInt32(kKeyChannelCount, channelCount);
+ mLastTrack->meta->setInt32(kKeySampleRate, sampleRate);
+ return OK;
+}
+
status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
ALOGV("MPEG4Extractor::parseSegmentIndex");
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 92ce88c..f2fdbc9 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -94,7 +94,7 @@
sp<IMemory> mMemory;
sp<DataSource> mSource;
String8 mName;
- RemoteDataSource(const sp<DataSource> &source);
+ explicit RemoteDataSource(const sp<DataSource> &source);
DISALLOW_EVIL_CONSTRUCTORS(RemoteDataSource);
};
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 1c76ad7..72241dc 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1505,6 +1505,7 @@
{ MEDIA_MIMETYPE_AUDIO_AAC, AUDIO_FORMAT_AAC },
{ MEDIA_MIMETYPE_AUDIO_VORBIS, AUDIO_FORMAT_VORBIS },
{ MEDIA_MIMETYPE_AUDIO_OPUS, AUDIO_FORMAT_OPUS},
+ { MEDIA_MIMETYPE_AUDIO_AC3, AUDIO_FORMAT_AC3},
{ 0, AUDIO_FORMAT_INVALID }
};
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 1c5e3c6..9fbdb72 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -62,6 +62,7 @@
OMX_AUDIO_AACObjectHE_PS,
OMX_AUDIO_AACObjectLD,
OMX_AUDIO_AACObjectELD,
+ OMX_AUDIO_AACObjectER_Scalable,
};
SoftAAC2::SoftAAC2(
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index 8ff2f35..2763c35 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -60,7 +60,7 @@
libstagefright_avcenc
LOCAL_SHARED_LIBRARIES := \
- libstagefright \
+ libmedia \
libstagefright_avc_common \
libstagefright_enc_common \
libstagefright_foundation \
diff --git a/media/libstagefright/codecs/avcdec/Android.mk b/media/libstagefright/codecs/avcdec/Android.mk
index ef0dbfd..aa7cb90 100644
--- a/media/libstagefright/codecs/avcdec/Android.mk
+++ b/media/libstagefright/codecs/avcdec/Android.mk
@@ -14,7 +14,7 @@
LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
-LOCAL_SHARED_LIBRARIES := libstagefright
+LOCAL_SHARED_LIBRARIES := libmedia
LOCAL_SHARED_LIBRARIES += libstagefright_omx
LOCAL_SHARED_LIBRARIES += libstagefright_foundation
LOCAL_SHARED_LIBRARIES += libutils
diff --git a/media/libstagefright/codecs/avcenc/Android.mk b/media/libstagefright/codecs/avcenc/Android.mk
index 70e531b..30e08e8 100644
--- a/media/libstagefright/codecs/avcenc/Android.mk
+++ b/media/libstagefright/codecs/avcenc/Android.mk
@@ -12,12 +12,10 @@
LOCAL_C_INCLUDES := $(TOP)/external/libavc/encoder
LOCAL_C_INCLUDES += $(TOP)/external/libavc/common
LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
-LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
-LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/hardware
LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
-LOCAL_SHARED_LIBRARIES := libstagefright
+LOCAL_SHARED_LIBRARIES := libmedia
LOCAL_SHARED_LIBRARIES += libstagefright_omx
LOCAL_SHARED_LIBRARIES += libstagefright_foundation
LOCAL_SHARED_LIBRARIES += libutils
diff --git a/media/libstagefright/codecs/flac/enc/Android.mk b/media/libstagefright/codecs/flac/enc/Android.mk
index 7e6e015..1ae1052 100644
--- a/media/libstagefright/codecs/flac/enc/Android.mk
+++ b/media/libstagefright/codecs/flac/enc/Android.mk
@@ -14,7 +14,7 @@
LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
LOCAL_SHARED_LIBRARIES := \
- libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+ libmedia libstagefright_omx libstagefright_foundation libutils liblog
LOCAL_STATIC_LIBRARIES := \
libFLAC \
diff --git a/media/libstagefright/codecs/g711/dec/Android.mk b/media/libstagefright/codecs/g711/dec/Android.mk
index b36c99d..96ddd47 100644
--- a/media/libstagefright/codecs/g711/dec/Android.mk
+++ b/media/libstagefright/codecs/g711/dec/Android.mk
@@ -9,7 +9,7 @@
frameworks/native/include/media/openmax
LOCAL_SHARED_LIBRARIES := \
- libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+ libmedia libstagefright_omx libstagefright_foundation libutils liblog
LOCAL_MODULE := libstagefright_soft_g711dec
LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/gsm/dec/Android.mk b/media/libstagefright/codecs/gsm/dec/Android.mk
index fe8c830..a8a4d1e 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.mk
+++ b/media/libstagefright/codecs/gsm/dec/Android.mk
@@ -14,7 +14,7 @@
LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
LOCAL_SHARED_LIBRARIES := \
- libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+ libmedia libstagefright_omx libstagefright_foundation libutils liblog
LOCAL_STATIC_LIBRARIES := \
libgsm
diff --git a/media/libstagefright/codecs/hevcdec/Android.mk b/media/libstagefright/codecs/hevcdec/Android.mk
index 78c4637..5f05a7b 100644
--- a/media/libstagefright/codecs/hevcdec/Android.mk
+++ b/media/libstagefright/codecs/hevcdec/Android.mk
@@ -16,7 +16,7 @@
LOCAL_CLANG := true
LOCAL_SANITIZE := signed-integer-overflow
-LOCAL_SHARED_LIBRARIES := libstagefright
+LOCAL_SHARED_LIBRARIES := libmedia
LOCAL_SHARED_LIBRARIES += libstagefright_omx
LOCAL_SHARED_LIBRARIES += libstagefright_foundation
LOCAL_SHARED_LIBRARIES += libutils
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.mk b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
index eb39b44..197495e 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
@@ -71,7 +71,7 @@
libstagefright_m4vh263dec
LOCAL_SHARED_LIBRARIES := \
- libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+ libmedia libstagefright_omx libstagefright_foundation libutils liblog
LOCAL_MODULE := libstagefright_soft_mpeg4dec
LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index ab079e8..c601af3 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -64,7 +64,7 @@
libstagefright_m4vh263enc
LOCAL_SHARED_LIBRARIES := \
- libstagefright \
+ libmedia \
libstagefright_enc_common \
libstagefright_foundation \
libstagefright_omx \
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index 11581c1..8422b62 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -81,7 +81,7 @@
LOCAL_SANITIZE := signed-integer-overflow
LOCAL_SHARED_LIBRARIES := \
- libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+ libmedia libstagefright_omx libstagefright_foundation libutils liblog
LOCAL_STATIC_LIBRARIES := \
libstagefright_mp3dec
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.mk b/media/libstagefright/codecs/mpeg2dec/Android.mk
index f1c1719..e7a2ea1 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.mk
+++ b/media/libstagefright/codecs/mpeg2dec/Android.mk
@@ -14,7 +14,7 @@
LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
-LOCAL_SHARED_LIBRARIES := libstagefright
+LOCAL_SHARED_LIBRARIES := libmedia
LOCAL_SHARED_LIBRARIES += libstagefright_omx
LOCAL_SHARED_LIBRARIES += libstagefright_foundation
LOCAL_SHARED_LIBRARIES += libutils
diff --git a/media/libstagefright/codecs/on2/dec/Android.mk b/media/libstagefright/codecs/on2/dec/Android.mk
index 76f7600..1aa63dd 100644
--- a/media/libstagefright/codecs/on2/dec/Android.mk
+++ b/media/libstagefright/codecs/on2/dec/Android.mk
@@ -15,7 +15,7 @@
libvpx
LOCAL_SHARED_LIBRARIES := \
- libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+ libmedia libstagefright_omx libstagefright_foundation libutils liblog
LOCAL_MODULE := libstagefright_soft_vpxdec
LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk
index 1de318a..a165fff 100644
--- a/media/libstagefright/codecs/on2/enc/Android.mk
+++ b/media/libstagefright/codecs/on2/enc/Android.mk
@@ -2,7 +2,9 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
- SoftVPXEncoder.cpp
+ SoftVPXEncoder.cpp \
+ SoftVP8Encoder.cpp \
+ SoftVP9Encoder.cpp
LOCAL_C_INCLUDES := \
$(TOP)/external/libvpx/libvpx \
@@ -18,7 +20,7 @@
libvpx
LOCAL_SHARED_LIBRARIES := \
- libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
+ libmedia libstagefright_omx libstagefright_foundation libutils liblog \
libhardware \
LOCAL_MODULE := libstagefright_soft_vpxenc
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp
new file mode 100644
index 0000000..04737a9
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "SoftVP8Encoder"
+#include "SoftVP8Encoder.h"
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+#ifndef INT32_MAX
+#define INT32_MAX 2147483647
+#endif
+
+namespace android {
+
+static const CodecProfileLevel kVp8ProfileLevels[] = {
+ { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version0 },
+ { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version1 },
+ { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version2 },
+ { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version3 },
+};
+
+SoftVP8Encoder::SoftVP8Encoder(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SoftVPXEncoder(
+ name, callbacks, appData, component, "video_encoder.vp8",
+ OMX_VIDEO_CodingVP8, MEDIA_MIMETYPE_VIDEO_VP8, 2,
+ kVp8ProfileLevels, NELEM(kVp8ProfileLevels)),
+ mDCTPartitions(0),
+ mLevel(OMX_VIDEO_VP8Level_Version0) {
+}
+
+void SoftVP8Encoder::setCodecSpecificInterface() {
+ mCodecInterface = vpx_codec_vp8_cx();
+}
+
+void SoftVP8Encoder::setCodecSpecificConfiguration() {
+ switch (mLevel) {
+ case OMX_VIDEO_VP8Level_Version0:
+ mCodecConfiguration->g_profile = 0;
+ break;
+
+ case OMX_VIDEO_VP8Level_Version1:
+ mCodecConfiguration->g_profile = 1;
+ break;
+
+ case OMX_VIDEO_VP8Level_Version2:
+ mCodecConfiguration->g_profile = 2;
+ break;
+
+ case OMX_VIDEO_VP8Level_Version3:
+ mCodecConfiguration->g_profile = 3;
+ break;
+
+ default:
+ mCodecConfiguration->g_profile = 0;
+ }
+}
+
+vpx_codec_err_t SoftVP8Encoder::setCodecSpecificControls() {
+ vpx_codec_err_t codec_return = vpx_codec_control(mCodecContext,
+ VP8E_SET_TOKEN_PARTITIONS,
+ mDCTPartitions);
+ if (codec_return != VPX_CODEC_OK) {
+ ALOGE("Error setting dct partitions for vpx encoder.");
+ }
+ return codec_return;
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalGetParameter(OMX_INDEXTYPE index,
+ OMX_PTR param) {
+ // can include extension index OMX_INDEXEXTTYPE
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
+ case OMX_IndexParamVideoVp8:
+ return internalGetVp8Params(
+ (OMX_VIDEO_PARAM_VP8TYPE *)param);
+
+ default:
+ return SoftVPXEncoder::internalGetParameter(index, param);
+ }
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalSetParameter(OMX_INDEXTYPE index,
+ const OMX_PTR param) {
+ // can include extension index OMX_INDEXEXTTYPE
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
+ case OMX_IndexParamVideoVp8:
+ return internalSetVp8Params(
+ (const OMX_VIDEO_PARAM_VP8TYPE *)param);
+
+ default:
+ return SoftVPXEncoder::internalSetParameter(index, param);
+ }
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalGetVp8Params(
+ OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
+ if (vp8Params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
+ vp8Params->eLevel = mLevel;
+ vp8Params->bErrorResilientMode = mErrorResilience;
+ vp8Params->nDCTPartitions = mDCTPartitions;
+ return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalSetVp8Params(
+ const OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
+ if (vp8Params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ if (vp8Params->eProfile != OMX_VIDEO_VP8ProfileMain) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (vp8Params->eLevel == OMX_VIDEO_VP8Level_Version0 ||
+ vp8Params->eLevel == OMX_VIDEO_VP8Level_Version1 ||
+ vp8Params->eLevel == OMX_VIDEO_VP8Level_Version2 ||
+ vp8Params->eLevel == OMX_VIDEO_VP8Level_Version3) {
+ mLevel = vp8Params->eLevel;
+ } else {
+ return OMX_ErrorBadParameter;
+ }
+
+ mErrorResilience = vp8Params->bErrorResilientMode;
+ if (vp8Params->nDCTPartitions <= kMaxDCTPartitions) {
+ mDCTPartitions = vp8Params->nDCTPartitions;
+ } else {
+ return OMX_ErrorBadParameter;
+ }
+ return OMX_ErrorNone;
+}
+
+} // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
new file mode 100644
index 0000000..b4904bf
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VP8_ENCODER_H_
+
+#define SOFT_VP8_ENCODER_H_
+
+#include "SoftVPXEncoder.h"
+
+#include <OMX_VideoExt.h>
+#include <OMX_IndexExt.h>
+
+#include <hardware/gralloc.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8cx.h"
+
+namespace android {
+
+// Exposes a vp8 encoder as an OMX Component
+//
+// In addition to the base class settings, Only following encoder settings are
+// available:
+// - token partitioning
+struct SoftVP8Encoder : public SoftVPXEncoder {
+ SoftVP8Encoder(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ // Returns current values for requested OMX
+ // parameters
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR param);
+
+ // Validates, extracts and stores relevant OMX
+ // parameters
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR param);
+
+ // Populates |mCodecInterface| with codec specific settings.
+ virtual void setCodecSpecificInterface();
+
+ // Sets codec specific configuration.
+ virtual void setCodecSpecificConfiguration();
+
+ // Initializes codec specific encoder settings.
+ virtual vpx_codec_err_t setCodecSpecificControls();
+
+ // Gets vp8 specific parameters.
+ OMX_ERRORTYPE internalGetVp8Params(
+ OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
+
+ // Handles vp8 specific parameters.
+ OMX_ERRORTYPE internalSetVp8Params(
+ const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
+
+private:
+ // Max value supported for DCT partitions
+ static const uint32_t kMaxDCTPartitions = 3;
+
+ // vp8 specific configuration parameter
+ // that enables token partitioning of
+ // the stream into substreams
+ int32_t mDCTPartitions;
+
+ // Encoder profile corresponding to OMX level parameter
+ //
+ // The inconsistency in the naming is caused by
+ // OMX spec referring vpx profiles (g_profile)
+ // as "levels" whereas using the name "profile" for
+ // something else.
+ OMX_VIDEO_VP8LEVELTYPE mLevel;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftVP8Encoder);
+};
+
+} // namespace android
+
+#endif // SOFT_VP8_ENCODER_H_
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
new file mode 100644
index 0000000..4c7290d
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "SoftVP9Encoder"
+#include "SoftVP9Encoder.h"
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+static const CodecProfileLevel kVp9ProfileLevels[] = {
+ { OMX_VIDEO_VP9Profile0, OMX_VIDEO_VP9Level41 },
+};
+
+SoftVP9Encoder::SoftVP9Encoder(
+ const char *name, const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SoftVPXEncoder(
+ name, callbacks, appData, component, "video_encoder.vp9",
+ OMX_VIDEO_CodingVP9, MEDIA_MIMETYPE_VIDEO_VP9, 4,
+ kVp9ProfileLevels, NELEM(kVp9ProfileLevels)),
+ mLevel(OMX_VIDEO_VP9Level1),
+ mTileColumns(0),
+ mFrameParallelDecoding(OMX_FALSE) {
+}
+
+void SoftVP9Encoder::setCodecSpecificInterface() {
+ mCodecInterface = vpx_codec_vp9_cx();
+}
+
+void SoftVP9Encoder::setCodecSpecificConfiguration() {
+ mCodecConfiguration->g_profile = 0;
+}
+
+vpx_codec_err_t SoftVP9Encoder::setCodecSpecificControls() {
+ vpx_codec_err_t codecReturn = vpx_codec_control(
+ mCodecContext, VP9E_SET_TILE_COLUMNS, mTileColumns);
+ if (codecReturn != VPX_CODEC_OK) {
+ ALOGE("Error setting VP9E_SET_TILE_COLUMNS to %d. vpx_codec_control() "
+ "returned %d", mTileColumns, codecReturn);
+ return codecReturn;
+ }
+ codecReturn = vpx_codec_control(
+ mCodecContext, VP9E_SET_FRAME_PARALLEL_DECODING,
+ mFrameParallelDecoding);
+ if (codecReturn != VPX_CODEC_OK) {
+ ALOGE("Error setting VP9E_SET_FRAME_PARALLEL_DECODING to %d."
+ "vpx_codec_control() returned %d", mFrameParallelDecoding,
+ codecReturn);
+ return codecReturn;
+ }
+ // For VP9, we always set CPU_USED to 8 (because the realtime default is 0
+ // which is too slow).
+ codecReturn = vpx_codec_control(mCodecContext, VP8E_SET_CPUUSED, 8);
+ if (codecReturn != VPX_CODEC_OK) {
+ ALOGE("Error setting VP8E_SET_CPUUSED to 8. vpx_codec_control() "
+ "returned %d", codecReturn);
+ return codecReturn;
+ }
+ return codecReturn;
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR param) {
+ // can include extension index OMX_INDEXEXTTYPE
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
+ case OMX_IndexParamVideoVp9:
+ return internalGetVp9Params(
+ (OMX_VIDEO_PARAM_VP9TYPE *)param);
+
+ default:
+ return SoftVPXEncoder::internalGetParameter(index, param);
+ }
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR param) {
+ // can include extension index OMX_INDEXEXTTYPE
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
+ case OMX_IndexParamVideoVp9:
+ return internalSetVp9Params(
+ (const OMX_VIDEO_PARAM_VP9TYPE *)param);
+
+ default:
+ return SoftVPXEncoder::internalSetParameter(index, param);
+ }
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalGetVp9Params(
+ OMX_VIDEO_PARAM_VP9TYPE *vp9Params) {
+ if (vp9Params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ vp9Params->eProfile = OMX_VIDEO_VP9Profile0;
+ vp9Params->eLevel = mLevel;
+ vp9Params->bErrorResilientMode = mErrorResilience;
+ vp9Params->nTileColumns = mTileColumns;
+ vp9Params->bEnableFrameParallelDecoding = mFrameParallelDecoding;
+ return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalSetVp9Params(
+ const OMX_VIDEO_PARAM_VP9TYPE *vp9Params) {
+ if (vp9Params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ if (vp9Params->eProfile != OMX_VIDEO_VP9Profile0) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (vp9Params->eLevel == OMX_VIDEO_VP9Level1 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level11 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level2 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level21 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level3 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level31 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level4 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level41 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level5 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level51 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level52 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level6 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level61 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level62) {
+ mLevel = vp9Params->eLevel;
+ } else {
+ return OMX_ErrorBadParameter;
+ }
+
+ mErrorResilience = vp9Params->bErrorResilientMode;
+ mTileColumns = vp9Params->nTileColumns;
+ mFrameParallelDecoding = vp9Params->bEnableFrameParallelDecoding;
+ return OMX_ErrorNone;
+}
+
+} // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
new file mode 100644
index 0000000..85df69a
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VP9_ENCODER_H_
+
+#define SOFT_VP9_ENCODER_H_
+
+#include "SoftVPXEncoder.h"
+
+#include <OMX_VideoExt.h>
+#include <OMX_IndexExt.h>
+
+#include <hardware/gralloc.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8cx.h"
+
+namespace android {
+
+// Exposes a VP9 encoder as an OMX Component
+//
+// In addition to the base class settings, Only following encoder settings are
+// available:
+// - tile rows
+// - tile columns
+// - frame parallel mode
+struct SoftVP9Encoder : public SoftVPXEncoder {
+ SoftVP9Encoder(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ // Returns current values for requested OMX
+ // parameters
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR param);
+
+ // Validates, extracts and stores relevant OMX
+ // parameters
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR param);
+
+ // Populates |mCodecInterface| with codec specific settings.
+ virtual void setCodecSpecificInterface();
+
+ // Sets codec specific configuration.
+ virtual void setCodecSpecificConfiguration();
+
+ // Initializes codec specific encoder settings.
+ virtual vpx_codec_err_t setCodecSpecificControls();
+
+ // Gets vp9 specific parameters.
+ OMX_ERRORTYPE internalGetVp9Params(
+ OMX_VIDEO_PARAM_VP9TYPE* vp9Params);
+
+ // Handles vp9 specific parameters.
+ OMX_ERRORTYPE internalSetVp9Params(
+ const OMX_VIDEO_PARAM_VP9TYPE* vp9Params);
+
+private:
+ // Encoder profile corresponding to OMX level parameter
+ //
+ // The inconsistency in the naming is caused by
+ // OMX spec referring vpx profiles (g_profile)
+ // as "levels" whereas using the name "profile" for
+ // something else.
+ OMX_VIDEO_VP9LEVELTYPE mLevel;
+
+ int32_t mTileColumns;
+
+ OMX_BOOL mFrameParallelDecoding;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftVP9Encoder);
+};
+
+} // namespace android
+
+#endif // SOFT_VP9_ENCODER_H_
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 5edfbb5..5609032 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -18,6 +18,9 @@
#define LOG_TAG "SoftVPXEncoder"
#include "SoftVPXEncoder.h"
+#include "SoftVP8Encoder.h"
+#include "SoftVP9Encoder.h"
+
#include <utils/Log.h>
#include <utils/misc.h>
@@ -42,7 +45,6 @@
params->nVersion.s.nStep = 0;
}
-
static int GetCPUCoreCount() {
int cpuCoreCount = 1;
#if defined(_SC_NPROCESSORS_ONLN)
@@ -55,30 +57,26 @@
return cpuCoreCount;
}
-static const CodecProfileLevel kProfileLevels[] = {
- { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version0 },
- { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version1 },
- { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version2 },
- { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version3 },
-};
-
SoftVPXEncoder::SoftVPXEncoder(const char *name,
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
- OMX_COMPONENTTYPE **component)
+ OMX_COMPONENTTYPE **component,
+ const char* role,
+ OMX_VIDEO_CODINGTYPE codingType,
+ const char* mimeType,
+ int32_t minCompressionRatio,
+ const CodecProfileLevel *profileLevels,
+ size_t numProfileLevels)
: SoftVideoEncoderOMXComponent(
- name, "video_encoder.vp8", OMX_VIDEO_CodingVP8,
- kProfileLevels, NELEM(kProfileLevels),
+ name, role, codingType, profileLevels, numProfileLevels,
176 /* width */, 144 /* height */,
callbacks, appData, component),
mCodecContext(NULL),
mCodecConfiguration(NULL),
mCodecInterface(NULL),
mBitrateUpdated(false),
- mBitrateControlMode(VPX_VBR), // variable bitrate
- mDCTPartitions(0),
+ mBitrateControlMode(VPX_VBR),
mErrorResilience(OMX_FALSE),
- mLevel(OMX_VIDEO_VP8Level_Version0),
mKeyFrameInterval(0),
mMinQuantizer(0),
mMaxQuantizer(0),
@@ -96,10 +94,9 @@
initPorts(
kNumBuffers, kNumBuffers, kMinOutputBufferSize,
- MEDIA_MIMETYPE_VIDEO_VP8, 2 /* minCompressionRatio */);
+ mimeType, minCompressionRatio);
}
-
SoftVPXEncoder::~SoftVPXEncoder() {
releaseEncoder();
}
@@ -108,18 +105,18 @@
vpx_codec_err_t codec_return;
status_t result = UNKNOWN_ERROR;
- mCodecInterface = vpx_codec_vp8_cx();
+ setCodecSpecificInterface();
if (mCodecInterface == NULL) {
goto CLEAN_UP;
}
- ALOGD("VP8: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
+ ALOGD("VPx: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
(uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
mMinQuantizer, mMaxQuantizer);
mCodecConfiguration = new vpx_codec_enc_cfg_t;
codec_return = vpx_codec_enc_config_default(mCodecInterface,
mCodecConfiguration,
- 0); // Codec specific flags
+ 0);
if (codec_return != VPX_CODEC_OK) {
ALOGE("Error populating default configuration for vpx encoder.");
@@ -131,27 +128,6 @@
mCodecConfiguration->g_threads = GetCPUCoreCount();
mCodecConfiguration->g_error_resilient = mErrorResilience;
- switch (mLevel) {
- case OMX_VIDEO_VP8Level_Version0:
- mCodecConfiguration->g_profile = 0;
- break;
-
- case OMX_VIDEO_VP8Level_Version1:
- mCodecConfiguration->g_profile = 1;
- break;
-
- case OMX_VIDEO_VP8Level_Version2:
- mCodecConfiguration->g_profile = 2;
- break;
-
- case OMX_VIDEO_VP8Level_Version3:
- mCodecConfiguration->g_profile = 3;
- break;
-
- default:
- mCodecConfiguration->g_profile = 0;
- }
-
// OMX timebase unit is microsecond
// g_timebase is in seconds (i.e. 1/1000000 seconds)
mCodecConfiguration->g_timebase.num = 1;
@@ -253,7 +229,6 @@
goto CLEAN_UP;
}
}
-
// Set bitrate values for each layer
for (size_t i = 0; i < mCodecConfiguration->ts_number_layers; i++) {
mCodecConfiguration->ts_target_bitrate[i] =
@@ -271,7 +246,7 @@
if (mMaxQuantizer > 0) {
mCodecConfiguration->rc_max_quantizer = mMaxQuantizer;
}
-
+ setCodecSpecificConfiguration();
mCodecContext = new vpx_codec_ctx_t;
codec_return = vpx_codec_enc_init(mCodecContext,
mCodecInterface,
@@ -283,14 +258,6 @@
goto CLEAN_UP;
}
- codec_return = vpx_codec_control(mCodecContext,
- VP8E_SET_TOKEN_PARTITIONS,
- mDCTPartitions);
- if (codec_return != VPX_CODEC_OK) {
- ALOGE("Error setting dct partitions for vpx encoder.");
- goto CLEAN_UP;
- }
-
// Extra CBR settings
if (mBitrateControlMode == VPX_CBR) {
codec_return = vpx_codec_control(mCodecContext,
@@ -318,6 +285,13 @@
}
}
+ codec_return = setCodecSpecificControls();
+
+ if (codec_return != VPX_CODEC_OK) {
+ // The codec specific method would have logged the error.
+ goto CLEAN_UP;
+ }
+
if (mColorFormat != OMX_COLOR_FormatYUV420Planar || mInputDataIsMeta) {
free(mConversionBuffer);
mConversionBuffer = NULL;
@@ -338,7 +312,6 @@
return result;
}
-
status_t SoftVPXEncoder::releaseEncoder() {
if (mCodecContext != NULL) {
vpx_codec_destroy(mCodecContext);
@@ -362,7 +335,6 @@
return OK;
}
-
OMX_ERRORTYPE SoftVPXEncoder::internalGetParameter(OMX_INDEXTYPE index,
OMX_PTR param) {
// can include extension index OMX_INDEXEXTTYPE
@@ -393,54 +365,15 @@
return OMX_ErrorNone;
}
- // VP8 specific parameters that use extension headers
- case OMX_IndexParamVideoVp8: {
- OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
- (OMX_VIDEO_PARAM_VP8TYPE *)param;
-
- if (!isValidOMXParam(vp8Params)) {
- return OMX_ErrorBadParameter;
- }
-
- if (vp8Params->nPortIndex != kOutputPortIndex) {
- return OMX_ErrorUnsupportedIndex;
- }
-
- vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
- vp8Params->eLevel = mLevel;
- vp8Params->nDCTPartitions = mDCTPartitions;
- vp8Params->bErrorResilientMode = mErrorResilience;
- return OMX_ErrorNone;
- }
-
- case OMX_IndexParamVideoAndroidVp8Encoder: {
- OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
- (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param;
-
- if (!isValidOMXParam(vp8AndroidParams)) {
- return OMX_ErrorBadParameter;
- }
-
- if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
- return OMX_ErrorUnsupportedIndex;
- }
-
- vp8AndroidParams->nKeyFrameInterval = mKeyFrameInterval;
- vp8AndroidParams->eTemporalPattern = mTemporalPatternType;
- vp8AndroidParams->nTemporalLayerCount = mTemporalLayers;
- vp8AndroidParams->nMinQuantizer = mMinQuantizer;
- vp8AndroidParams->nMaxQuantizer = mMaxQuantizer;
- memcpy(vp8AndroidParams->nTemporalLayerBitrateRatio,
- mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
- return OMX_ErrorNone;
- }
+ case OMX_IndexParamVideoAndroidVp8Encoder:
+ return internalGetAndroidVpxParams(
+ (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
default:
return SoftVideoEncoderOMXComponent::internalGetParameter(index, param);
}
}
-
OMX_ERRORTYPE SoftVPXEncoder::internalSetParameter(OMX_INDEXTYPE index,
const OMX_PTR param) {
// can include extension index OMX_INDEXEXTTYPE
@@ -458,27 +391,9 @@
return internalSetBitrateParams(bitRate);
}
- case OMX_IndexParamVideoVp8: {
- const OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
- (const OMX_VIDEO_PARAM_VP8TYPE*) param;
-
- if (!isValidOMXParam(vp8Params)) {
- return OMX_ErrorBadParameter;
- }
-
- return internalSetVp8Params(vp8Params);
- }
-
- case OMX_IndexParamVideoAndroidVp8Encoder: {
- const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
- (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE*) param;
-
- if (!isValidOMXParam(vp8AndroidParams)) {
- return OMX_ErrorBadParameter;
- }
-
- return internalSetAndroidVp8Params(vp8AndroidParams);
- }
+ case OMX_IndexParamVideoAndroidVp8Encoder:
+ return internalSetAndroidVpxParams(
+ (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
default:
return SoftVideoEncoderOMXComponent::internalSetParameter(index, param);
@@ -530,77 +445,21 @@
}
}
-OMX_ERRORTYPE SoftVPXEncoder::internalSetVp8Params(
- const OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
- if (vp8Params->nPortIndex != kOutputPortIndex) {
+OMX_ERRORTYPE SoftVPXEncoder::internalGetBitrateParams(
+ OMX_VIDEO_PARAM_BITRATETYPE* bitrate) {
+ if (bitrate->nPortIndex != kOutputPortIndex) {
return OMX_ErrorUnsupportedIndex;
}
- if (vp8Params->eProfile != OMX_VIDEO_VP8ProfileMain) {
- return OMX_ErrorBadParameter;
- }
+ bitrate->nTargetBitrate = mBitrate;
- if (vp8Params->eLevel == OMX_VIDEO_VP8Level_Version0 ||
- vp8Params->eLevel == OMX_VIDEO_VP8Level_Version1 ||
- vp8Params->eLevel == OMX_VIDEO_VP8Level_Version2 ||
- vp8Params->eLevel == OMX_VIDEO_VP8Level_Version3) {
- mLevel = vp8Params->eLevel;
+ if (mBitrateControlMode == VPX_VBR) {
+ bitrate->eControlRate = OMX_Video_ControlRateVariable;
+ } else if (mBitrateControlMode == VPX_CBR) {
+ bitrate->eControlRate = OMX_Video_ControlRateConstant;
} else {
- return OMX_ErrorBadParameter;
+ return OMX_ErrorUnsupportedSetting;
}
-
- if (vp8Params->nDCTPartitions <= kMaxDCTPartitions) {
- mDCTPartitions = vp8Params->nDCTPartitions;
- } else {
- return OMX_ErrorBadParameter;
- }
-
- mErrorResilience = vp8Params->bErrorResilientMode;
- return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVp8Params(
- const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams) {
- if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
- return OMX_ErrorUnsupportedIndex;
- }
- if (vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
- vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
- return OMX_ErrorBadParameter;
- }
- if (vp8AndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
- return OMX_ErrorBadParameter;
- }
- if (vp8AndroidParams->nMinQuantizer > vp8AndroidParams->nMaxQuantizer) {
- return OMX_ErrorBadParameter;
- }
-
- mTemporalPatternType = vp8AndroidParams->eTemporalPattern;
- if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
- mTemporalLayers = vp8AndroidParams->nTemporalLayerCount;
- } else if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
- mTemporalLayers = 0;
- }
- // Check the bitrate distribution between layers is in increasing order
- if (mTemporalLayers > 1) {
- for (size_t i = 0; i < mTemporalLayers - 1; i++) {
- if (vp8AndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
- vp8AndroidParams->nTemporalLayerBitrateRatio[i]) {
- ALOGE("Wrong bitrate ratio - should be in increasing order.");
- return OMX_ErrorBadParameter;
- }
- }
- }
- mKeyFrameInterval = vp8AndroidParams->nKeyFrameInterval;
- mMinQuantizer = vp8AndroidParams->nMinQuantizer;
- mMaxQuantizer = vp8AndroidParams->nMaxQuantizer;
- memcpy(mTemporalLayerBitrateRatio, vp8AndroidParams->nTemporalLayerBitrateRatio,
- sizeof(mTemporalLayerBitrateRatio));
- ALOGD("VP8: internalSetAndroidVp8Params. BRMode: %u. TS: %zu. KF: %u."
- " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
- (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
- mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
- mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
return OMX_ErrorNone;
}
@@ -623,71 +482,134 @@
return OMX_ErrorNone;
}
+OMX_ERRORTYPE SoftVPXEncoder::internalGetAndroidVpxParams(
+ OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams) {
+ if (vpxAndroidParams->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ vpxAndroidParams->nKeyFrameInterval = mKeyFrameInterval;
+ vpxAndroidParams->eTemporalPattern = mTemporalPatternType;
+ vpxAndroidParams->nTemporalLayerCount = mTemporalLayers;
+ vpxAndroidParams->nMinQuantizer = mMinQuantizer;
+ vpxAndroidParams->nMaxQuantizer = mMaxQuantizer;
+ memcpy(vpxAndroidParams->nTemporalLayerBitrateRatio,
+ mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
+ return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVpxParams(
+ const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams) {
+ if (vpxAndroidParams->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+ if (vpxAndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
+ vpxAndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+ return OMX_ErrorBadParameter;
+ }
+ if (vpxAndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
+ return OMX_ErrorBadParameter;
+ }
+ if (vpxAndroidParams->nMinQuantizer > vpxAndroidParams->nMaxQuantizer) {
+ return OMX_ErrorBadParameter;
+ }
+
+ mTemporalPatternType = vpxAndroidParams->eTemporalPattern;
+ if (vpxAndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+ mTemporalLayers = vpxAndroidParams->nTemporalLayerCount;
+ } else if (vpxAndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
+ mTemporalLayers = 0;
+ }
+ // Check the bitrate distribution between layers is in increasing order
+ if (mTemporalLayers > 1) {
+ for (size_t i = 0; i < mTemporalLayers - 1; i++) {
+ if (vpxAndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
+ vpxAndroidParams->nTemporalLayerBitrateRatio[i]) {
+ ALOGE("Wrong bitrate ratio - should be in increasing order.");
+ return OMX_ErrorBadParameter;
+ }
+ }
+ }
+ mKeyFrameInterval = vpxAndroidParams->nKeyFrameInterval;
+ mMinQuantizer = vpxAndroidParams->nMinQuantizer;
+ mMaxQuantizer = vpxAndroidParams->nMaxQuantizer;
+ memcpy(mTemporalLayerBitrateRatio, vpxAndroidParams->nTemporalLayerBitrateRatio,
+ sizeof(mTemporalLayerBitrateRatio));
+ ALOGD("VPx: internalSetAndroidVpxParams. BRMode: %u. TS: %zu. KF: %u."
+ " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
+ (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
+ mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
+ mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
+ return OMX_ErrorNone;
+}
+
vpx_enc_frame_flags_t SoftVPXEncoder::getEncodeFlags() {
vpx_enc_frame_flags_t flags = 0;
- int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
- mTemporalPatternIdx++;
- switch (mTemporalPattern[patternIdx]) {
- case kTemporalUpdateLast:
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_REF_GF;
- flags |= VP8_EFLAG_NO_REF_ARF;
- break;
- case kTemporalUpdateGoldenWithoutDependency:
- flags |= VP8_EFLAG_NO_REF_GF;
- // Deliberately no break here.
- case kTemporalUpdateGolden:
- flags |= VP8_EFLAG_NO_REF_ARF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- break;
- case kTemporalUpdateAltrefWithoutDependency:
- flags |= VP8_EFLAG_NO_REF_ARF;
- flags |= VP8_EFLAG_NO_REF_GF;
- // Deliberately no break here.
- case kTemporalUpdateAltref:
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- break;
- case kTemporalUpdateNoneNoRefAltref:
- flags |= VP8_EFLAG_NO_REF_ARF;
- // Deliberately no break here.
- case kTemporalUpdateNone:
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- flags |= VP8_EFLAG_NO_UPD_ENTROPY;
- break;
- case kTemporalUpdateNoneNoRefGoldenRefAltRef:
- flags |= VP8_EFLAG_NO_REF_GF;
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- flags |= VP8_EFLAG_NO_UPD_ENTROPY;
- break;
- case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
- flags |= VP8_EFLAG_NO_REF_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- break;
- case kTemporalUpdateLastRefAltRef:
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_REF_GF;
- break;
- case kTemporalUpdateGoldenRefAltRef:
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- break;
- case kTemporalUpdateLastAndGoldenRefAltRef:
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_REF_GF;
- break;
- case kTemporalUpdateLastRefAll:
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_GF;
- break;
+ if (mTemporalPatternLength > 0) {
+ int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
+ mTemporalPatternIdx++;
+ switch (mTemporalPattern[patternIdx]) {
+ case kTemporalUpdateLast:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ break;
+ case kTemporalUpdateGoldenWithoutDependency:
+ flags |= VP8_EFLAG_NO_REF_GF;
+ // Deliberately no break here.
+ case kTemporalUpdateGolden:
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateAltrefWithoutDependency:
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ // Deliberately no break here.
+ case kTemporalUpdateAltref:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateNoneNoRefAltref:
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ // Deliberately no break here.
+ case kTemporalUpdateNone:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+ break;
+ case kTemporalUpdateNoneNoRefGoldenRefAltRef:
+ flags |= VP8_EFLAG_NO_REF_GF;
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+ break;
+ case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
+ flags |= VP8_EFLAG_NO_REF_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateLastRefAltRef:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ break;
+ case kTemporalUpdateGoldenRefAltRef:
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateLastAndGoldenRefAltRef:
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ break;
+ case kTemporalUpdateLastRefAll:
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ break;
+ }
}
return flags;
}
@@ -765,10 +687,7 @@
vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
kInputBufferAlignment, (uint8_t *)source);
- vpx_enc_frame_flags_t flags = 0;
- if (mTemporalPatternLength > 0) {
- flags = getEncodeFlags();
- }
+ vpx_enc_frame_flags_t flags = getEncodeFlags();
if (mKeyFrameRequested) {
flags |= VPX_EFLAG_FORCE_KF;
mKeyFrameRequested = false;
@@ -779,7 +698,7 @@
vpx_codec_err_t res = vpx_codec_enc_config_set(mCodecContext,
mCodecConfiguration);
if (res != VPX_CODEC_OK) {
- ALOGE("vp8 encoder failed to update bitrate: %s",
+ ALOGE("vpx encoder failed to update bitrate: %s",
vpx_codec_err_to_string(res));
notify(OMX_EventError,
OMX_ErrorUndefined,
@@ -849,9 +768,15 @@
} // namespace android
-
android::SoftOMXComponent *createSoftOMXComponent(
const char *name, const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData, OMX_COMPONENTTYPE **component) {
- return new android::SoftVPXEncoder(name, callbacks, appData, component);
+ if (!strcmp(name, "OMX.google.vp8.encoder")) {
+ return new android::SoftVP8Encoder(name, callbacks, appData, component);
+ } else if (!strcmp(name, "OMX.google.vp9.encoder")) {
+ return new android::SoftVP9Encoder(name, callbacks, appData, component);
+ } else {
+ CHECK(!"Unknown component");
+ }
+ return NULL;
}
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index cd0a0cf..86e71da 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -31,18 +31,18 @@
namespace android {
-// Exposes a vpx encoder as an OMX Component
+// Base class for a VPX Encoder OMX Component
//
// Boilerplate for callback bindings are taken care
// by the base class SimpleSoftOMXComponent and its
// parent SoftOMXComponent.
//
-// Only following encoder settings are available
+// Only following encoder settings are available (codec specific settings might
+// be available in the sub-classes):
// - target bitrate
// - rate control (constant / variable)
// - frame rate
// - error resilience
-// - token partitioning
// - reconstruction & loop filters (g_profile)
//
// Only following color formats are recognized
@@ -54,7 +54,7 @@
// - encoding deadline is realtime
// - multithreaded encoding utilizes a number of threads equal
// to online cpu's available
-// - the algorithm interface for encoder is vp8
+// - the algorithm interface for encoder is decided by the sub-class in use
// - fractional bits of frame rate is discarded
// - OMX timestamps are in microseconds, therefore
// encoder timebase is fixed to 1/1000000
@@ -63,7 +63,13 @@
SoftVPXEncoder(const char *name,
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
- OMX_COMPONENTTYPE **component);
+ OMX_COMPONENTTYPE **component,
+ const char* role,
+ OMX_VIDEO_CODINGTYPE codingType,
+ const char* mimeType,
+ int32_t minCompressionRatio,
+ const CodecProfileLevel *profileLevels,
+ size_t numProfileLevels);
protected:
virtual ~SoftVPXEncoder();
@@ -87,7 +93,44 @@
// encoding of the frame
virtual void onQueueFilled(OMX_U32 portIndex);
-private:
+ // Initializes vpx encoder with available settings.
+ status_t initEncoder();
+
+ // Populates mCodecInterface with codec specific settings.
+ virtual void setCodecSpecificInterface() = 0;
+
+ // Sets codec specific configuration.
+ virtual void setCodecSpecificConfiguration() = 0;
+
+ // Sets codec specific encoder controls.
+ virtual vpx_codec_err_t setCodecSpecificControls() = 0;
+
+ // Get current encode flags.
+ virtual vpx_enc_frame_flags_t getEncodeFlags();
+
+ // Releases vpx encoder instance, with it's associated
+ // data structures.
+ //
+ // Unless called earlier, this is handled by the
+ // dtor.
+ status_t releaseEncoder();
+
+ // Get bitrate parameters.
+ virtual OMX_ERRORTYPE internalGetBitrateParams(
+ OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
+
+ // Updates bitrate to reflect port settings.
+ virtual OMX_ERRORTYPE internalSetBitrateParams(
+ const OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
+
+ // Gets Android vpx specific parameters.
+ OMX_ERRORTYPE internalGetAndroidVpxParams(
+ OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams);
+
+ // Handles Android vpx specific parameters.
+ OMX_ERRORTYPE internalSetAndroidVpxParams(
+ const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams);
+
enum TemporalReferences {
// For 1 layer case: reference all (last, golden, and alt ref), but only
// update last.
@@ -137,9 +180,6 @@
static const uint32_t kInputBufferAlignment = 1;
static const uint32_t kOutputBufferAlignment = 2;
- // Max value supported for DCT partitions
- static const uint32_t kMaxDCTPartitions = 3;
-
// Number of supported input color formats
static const uint32_t kNumberOfSupportedColorFormats = 3;
@@ -161,23 +201,10 @@
// Bitrate control mode, either constant or variable
vpx_rc_mode mBitrateControlMode;
- // vp8 specific configuration parameter
- // that enables token partitioning of
- // the stream into substreams
- int32_t mDCTPartitions;
-
// Parameter that denotes whether error resilience
// is enabled in encoder
OMX_BOOL mErrorResilience;
- // Encoder profile corresponding to OMX level parameter
- //
- // The inconsistency in the naming is caused by
- // OMX spec referring vpx profiles (g_profile)
- // as "levels" whereas using the name "profile" for
- // something else.
- OMX_VIDEO_VP8LEVELTYPE mLevel;
-
// Key frame interval in frames
uint32_t mKeyFrameInterval;
@@ -216,31 +243,6 @@
bool mKeyFrameRequested;
- // Initializes vpx encoder with available settings.
- status_t initEncoder();
-
- // Releases vpx encoder instance, with it's associated
- // data structures.
- //
- // Unless called earlier, this is handled by the
- // dtor.
- status_t releaseEncoder();
-
- // Get current encode flags
- vpx_enc_frame_flags_t getEncodeFlags();
-
- // Updates bitrate to reflect port settings.
- OMX_ERRORTYPE internalSetBitrateParams(
- const OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
-
- // Handles vp8 specific parameters.
- OMX_ERRORTYPE internalSetVp8Params(
- const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
-
- // Handles Android vp8 specific parameters.
- OMX_ERRORTYPE internalSetAndroidVp8Params(
- const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams);
-
DISALLOW_EVIL_CONSTRUCTORS(SoftVPXEncoder);
};
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index 7159674..9f2c055 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -99,7 +99,7 @@
LOCAL_SANITIZE := signed-integer-overflow
LOCAL_SHARED_LIBRARIES := \
- libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
+ libmedia libstagefright_omx libstagefright_foundation libutils liblog \
LOCAL_MODULE := libstagefright_soft_h264dec
diff --git a/media/libstagefright/codecs/opus/dec/Android.mk b/media/libstagefright/codecs/opus/dec/Android.mk
index f272763..894c4da 100644
--- a/media/libstagefright/codecs/opus/dec/Android.mk
+++ b/media/libstagefright/codecs/opus/dec/Android.mk
@@ -10,7 +10,7 @@
frameworks/native/include/media/openmax \
LOCAL_SHARED_LIBRARIES := \
- libopus libstagefright libstagefright_omx \
+ libopus libmedia libstagefright_omx \
libstagefright_foundation libutils liblog
LOCAL_CLANG := true
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.mk b/media/libstagefright/codecs/vorbis/dec/Android.mk
index 039be6f..7af2993 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.mk
+++ b/media/libstagefright/codecs/vorbis/dec/Android.mk
@@ -10,7 +10,7 @@
frameworks/native/include/media/openmax \
LOCAL_SHARED_LIBRARIES := \
- libvorbisidec libstagefright libstagefright_omx \
+ libvorbisidec libmedia libstagefright_omx \
libstagefright_foundation libutils liblog
LOCAL_MODULE := libstagefright_soft_vorbisdec
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index b03c769..ce164a2 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -101,5 +101,12 @@
<Limit name="bitrate" range="1-40000000" />
<Feature name="bitrate-modes" value="VBR,CBR" />
</MediaCodec>
+ <MediaCodec name="OMX.google.vp9.encoder" type="video/x-vnd.on2.vp9">
+ <!-- profiles and levels: ProfileMain : Level_Version0-3 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="bitrate-modes" value="VBR,CBR" />
+ </MediaCodec>
</Encoders>
</Included>
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 9ed53e7..647a9fd 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -100,7 +100,7 @@
}
}
-static void makeFourCC(uint32_t fourcc, char *s) {
+static void makeFourCC(uint32_t fourcc, char *s, size_t bufsz) {
s[0] = (fourcc >> 24) & 0xff;
if (s[0]) {
s[1] = (fourcc >> 16) & 0xff;
@@ -108,7 +108,7 @@
s[3] = fourcc & 0xff;
s[4] = 0;
} else {
- sprintf(s, "%u", fourcc);
+ snprintf(s, bufsz, "%u", fourcc);
}
}
@@ -146,7 +146,7 @@
if (verboseStats) {
for (size_t j = 0; j < handler->mMessages.size(); j++) {
char fourcc[15];
- makeFourCC(handler->mMessages.keyAt(j), fourcc);
+ makeFourCC(handler->mMessages.keyAt(j), fourcc, sizeof(fourcc));
s.appendFormat("\n %s: %u",
fourcc,
handler->mMessages.valueAt(j));
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index b167543..04fac19 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#define LOG_TAG "AString"
+#include <utils/Log.h>
+
#include <ctype.h>
#include <stdarg.h>
#include <stdio.h>
@@ -40,14 +43,24 @@
: mData(NULL),
mSize(0),
mAllocSize(1) {
- setTo(s);
+ if (!s) {
+ ALOGW("ctor got NULL, using empty string instead");
+ clear();
+ } else {
+ setTo(s);
+ }
}
AString::AString(const char *s, size_t size)
: mData(NULL),
mSize(0),
mAllocSize(1) {
- setTo(s, size);
+ if (!s) {
+ ALOGW("ctor got NULL, using empty string instead");
+ clear();
+ } else {
+ setTo(s, size);
+ }
}
AString::AString(const String8 &from)
diff --git a/media/libstagefright/foundation/hexdump.cpp b/media/libstagefright/foundation/hexdump.cpp
index a44d832..872c5f3 100644
--- a/media/libstagefright/foundation/hexdump.cpp
+++ b/media/libstagefright/foundation/hexdump.cpp
@@ -49,7 +49,7 @@
appendIndent(&line, indent);
char tmp[32];
- sprintf(tmp, "%08lx: ", (unsigned long)offset);
+ snprintf(tmp, sizeof(tmp), "%08lx: ", (unsigned long)offset);
line.append(tmp);
@@ -60,7 +60,7 @@
if (offset + i >= size) {
line.append(" ");
} else {
- sprintf(tmp, "%02x ", data[offset + i]);
+ snprintf(tmp, sizeof(tmp), "%02x ", data[offset + i]);
line.append(tmp);
}
}
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 35691b9..1b5db35 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -514,7 +514,7 @@
if (mOffset == 126 || mOffset == 127) {
// Special treatment for the track number and genre.
char tmp[16];
- sprintf(tmp, "%d", (int)*frameData);
+ snprintf(tmp, sizeof(tmp), "%d", (int)*frameData);
id->setTo(tmp);
return;
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 18b14e1..a66ebc9 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -140,6 +140,9 @@
Track *findTrackByMimePrefix(const char *mimePrefix);
+ status_t parseAC3SampleEntry(off64_t offset);
+ status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+
MPEG4Extractor(const MPEG4Extractor &);
MPEG4Extractor &operator=(const MPEG4Extractor &);
};
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index 6132a2c..ac9b0c3 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -32,26 +32,23 @@
OMXMaster::OMXMaster()
: mVendorLibHandle(NULL) {
- mProcessName[0] = 0;
- if (mProcessName[0] == 0) {
- pid_t pid = getpid();
- char filename[20];
- snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
- int fd = open(filename, O_RDONLY);
- if (fd < 0) {
- ALOGW("couldn't determine process name");
- sprintf(mProcessName, "<unknown>");
- } else {
- ssize_t len = read(fd, mProcessName, sizeof(mProcessName));
- if (len < 2) {
- ALOGW("couldn't determine process name");
- sprintf(mProcessName, "<unknown>");
- } else {
- // the name is newline terminated, so erase the newline
- mProcessName[len - 1] = 0;
- }
- close(fd);
- }
+ pid_t pid = getpid();
+ char filename[20];
+ snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
+ int fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ ALOGW("couldn't determine process name");
+ strlcpy(mProcessName, "<unknown>", sizeof(mProcessName));
+ } else {
+ ssize_t len = read(fd, mProcessName, sizeof(mProcessName));
+ if (len < 2) {
+ ALOGW("couldn't determine process name");
+ strlcpy(mProcessName, "<unknown>", sizeof(mProcessName));
+ } else {
+ // the name is newline terminated, so erase the newline
+ mProcessName[len - 1] = 0;
+ }
+ close(fd);
}
addVendorPlugin();
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
index 799696c..c5879b8 100644
--- a/media/libstagefright/omx/OMXUtils.cpp
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -20,7 +20,10 @@
#include <string.h>
#include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/MediaErrors.h>
+#include <media/MediaDefs.h>
#include "OMXUtils.h"
namespace android {
@@ -101,5 +104,273 @@
/**************************************************************************************************/
+const char *GetComponentRole(bool isEncoder, const char *mime) {
+ struct MimeToRole {
+ const char *mime;
+ const char *decoderRole;
+ const char *encoderRole;
+ };
+
+ static const MimeToRole kMimeToRole[] = {
+ { MEDIA_MIMETYPE_AUDIO_MPEG,
+ "audio_decoder.mp3", "audio_encoder.mp3" },
+ { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I,
+ "audio_decoder.mp1", "audio_encoder.mp1" },
+ { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
+ "audio_decoder.mp2", "audio_encoder.mp2" },
+ { MEDIA_MIMETYPE_AUDIO_AMR_NB,
+ "audio_decoder.amrnb", "audio_encoder.amrnb" },
+ { MEDIA_MIMETYPE_AUDIO_AMR_WB,
+ "audio_decoder.amrwb", "audio_encoder.amrwb" },
+ { MEDIA_MIMETYPE_AUDIO_AAC,
+ "audio_decoder.aac", "audio_encoder.aac" },
+ { MEDIA_MIMETYPE_AUDIO_VORBIS,
+ "audio_decoder.vorbis", "audio_encoder.vorbis" },
+ { MEDIA_MIMETYPE_AUDIO_OPUS,
+ "audio_decoder.opus", "audio_encoder.opus" },
+ { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
+ "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
+ { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
+ "audio_decoder.g711alaw", "audio_encoder.g711alaw" },
+ { MEDIA_MIMETYPE_VIDEO_AVC,
+ "video_decoder.avc", "video_encoder.avc" },
+ { MEDIA_MIMETYPE_VIDEO_HEVC,
+ "video_decoder.hevc", "video_encoder.hevc" },
+ { MEDIA_MIMETYPE_VIDEO_MPEG4,
+ "video_decoder.mpeg4", "video_encoder.mpeg4" },
+ { MEDIA_MIMETYPE_VIDEO_H263,
+ "video_decoder.h263", "video_encoder.h263" },
+ { MEDIA_MIMETYPE_VIDEO_VP8,
+ "video_decoder.vp8", "video_encoder.vp8" },
+ { MEDIA_MIMETYPE_VIDEO_VP9,
+ "video_decoder.vp9", "video_encoder.vp9" },
+ { MEDIA_MIMETYPE_AUDIO_RAW,
+ "audio_decoder.raw", "audio_encoder.raw" },
+ { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+ "video_decoder.dolby-vision", "video_encoder.dolby-vision" },
+ { MEDIA_MIMETYPE_AUDIO_FLAC,
+ "audio_decoder.flac", "audio_encoder.flac" },
+ { MEDIA_MIMETYPE_AUDIO_MSGSM,
+ "audio_decoder.gsm", "audio_encoder.gsm" },
+ { MEDIA_MIMETYPE_VIDEO_MPEG2,
+ "video_decoder.mpeg2", "video_encoder.mpeg2" },
+ { MEDIA_MIMETYPE_AUDIO_AC3,
+ "audio_decoder.ac3", "audio_encoder.ac3" },
+ { MEDIA_MIMETYPE_AUDIO_EAC3,
+ "audio_decoder.eac3", "audio_encoder.eac3" },
+ };
+
+ static const size_t kNumMimeToRole =
+ sizeof(kMimeToRole) / sizeof(kMimeToRole[0]);
+
+ size_t i;
+ for (i = 0; i < kNumMimeToRole; ++i) {
+ if (!strcasecmp(mime, kMimeToRole[i].mime)) {
+ break;
+ }
+ }
+
+ if (i == kNumMimeToRole) {
+ return NULL;
+ }
+
+ return isEncoder ? kMimeToRole[i].encoderRole
+ : kMimeToRole[i].decoderRole;
+}
+
+status_t SetComponentRole(const sp<IOMX> &omx, IOMX::node_id node, const char *role) {
+ OMX_PARAM_COMPONENTROLETYPE roleParams;
+ InitOMXParams(&roleParams);
+
+ strncpy((char *)roleParams.cRole,
+ role, OMX_MAX_STRINGNAME_SIZE - 1);
+
+ roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
+
+ return omx->setParameter(
+ node, OMX_IndexParamStandardComponentRole,
+ &roleParams, sizeof(roleParams));
+}
+
+bool DescribeDefaultColorFormat(DescribeColorFormat2Params ¶ms) {
+ MediaImage2 &image = params.sMediaImage;
+ memset(&image, 0, sizeof(image));
+
+ image.mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+ image.mNumPlanes = 0;
+
+ const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
+ image.mWidth = params.nFrameWidth;
+ image.mHeight = params.nFrameHeight;
+
+ // only supporting YUV420
+ if (fmt != OMX_COLOR_FormatYUV420Planar &&
+ fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
+ fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
+ fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
+ fmt != (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YV12) {
+ ALOGW("do not know color format 0x%x = %d", fmt, fmt);
+ return false;
+ }
+
+ // TEMPORARY FIX for some vendors that advertise sliceHeight as 0
+ if (params.nStride != 0 && params.nSliceHeight == 0) {
+ ALOGW("using sliceHeight=%u instead of what codec advertised (=0)",
+ params.nFrameHeight);
+ params.nSliceHeight = params.nFrameHeight;
+ }
+
+ // we need stride and slice-height to be non-zero and sensible. These values were chosen to
+ // prevent integer overflows further down the line, and do not indicate support for
+ // 32kx32k video.
+ if (params.nStride == 0 || params.nSliceHeight == 0
+ || params.nStride > 32768 || params.nSliceHeight > 32768) {
+ ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
+ fmt, fmt, params.nStride, params.nSliceHeight);
+ return false;
+ }
+
+ // set-up YUV format
+ image.mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
+ image.mNumPlanes = 3;
+ image.mBitDepth = 8;
+ image.mBitDepthAllocated = 8;
+ image.mPlane[image.Y].mOffset = 0;
+ image.mPlane[image.Y].mColInc = 1;
+ image.mPlane[image.Y].mRowInc = params.nStride;
+ image.mPlane[image.Y].mHorizSubsampling = 1;
+ image.mPlane[image.Y].mVertSubsampling = 1;
+
+ switch ((int)fmt) {
+ case HAL_PIXEL_FORMAT_YV12:
+ if (params.bUsingNativeBuffers) {
+ size_t ystride = align(params.nStride, 16);
+ size_t cstride = align(params.nStride / 2, 16);
+ image.mPlane[image.Y].mRowInc = ystride;
+
+ image.mPlane[image.V].mOffset = ystride * params.nSliceHeight;
+ image.mPlane[image.V].mColInc = 1;
+ image.mPlane[image.V].mRowInc = cstride;
+ image.mPlane[image.V].mHorizSubsampling = 2;
+ image.mPlane[image.V].mVertSubsampling = 2;
+
+ image.mPlane[image.U].mOffset = image.mPlane[image.V].mOffset
+ + (cstride * params.nSliceHeight / 2);
+ image.mPlane[image.U].mColInc = 1;
+ image.mPlane[image.U].mRowInc = cstride;
+ image.mPlane[image.U].mHorizSubsampling = 2;
+ image.mPlane[image.U].mVertSubsampling = 2;
+ break;
+ } else {
+ // fall through as YV12 is used for YUV420Planar by some codecs
+ }
+
+ case OMX_COLOR_FormatYUV420Planar:
+ case OMX_COLOR_FormatYUV420PackedPlanar:
+ image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+ image.mPlane[image.U].mColInc = 1;
+ image.mPlane[image.U].mRowInc = params.nStride / 2;
+ image.mPlane[image.U].mHorizSubsampling = 2;
+ image.mPlane[image.U].mVertSubsampling = 2;
+
+ image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset
+ + (params.nStride * params.nSliceHeight / 4);
+ image.mPlane[image.V].mColInc = 1;
+ image.mPlane[image.V].mRowInc = params.nStride / 2;
+ image.mPlane[image.V].mHorizSubsampling = 2;
+ image.mPlane[image.V].mVertSubsampling = 2;
+ break;
+
+ case OMX_COLOR_FormatYUV420SemiPlanar:
+ // FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
+ case OMX_COLOR_FormatYUV420PackedSemiPlanar:
+ // NV12
+ image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+ image.mPlane[image.U].mColInc = 2;
+ image.mPlane[image.U].mRowInc = params.nStride;
+ image.mPlane[image.U].mHorizSubsampling = 2;
+ image.mPlane[image.U].mVertSubsampling = 2;
+
+ image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset + 1;
+ image.mPlane[image.V].mColInc = 2;
+ image.mPlane[image.V].mRowInc = params.nStride;
+ image.mPlane[image.V].mHorizSubsampling = 2;
+ image.mPlane[image.V].mVertSubsampling = 2;
+ break;
+
+ default:
+ TRESPASS();
+ }
+ return true;
+}
+
+bool DescribeColorFormat(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ DescribeColorFormat2Params &describeParams)
+{
+ OMX_INDEXTYPE describeColorFormatIndex;
+ if (omx->getExtensionIndex(
+ node, "OMX.google.android.index.describeColorFormat",
+ &describeColorFormatIndex) == OK) {
+ DescribeColorFormatParams describeParamsV1(describeParams);
+ if (omx->getParameter(
+ node, describeColorFormatIndex,
+ &describeParamsV1, sizeof(describeParamsV1)) == OK) {
+ describeParams.initFromV1(describeParamsV1);
+ return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+ }
+ } else if (omx->getExtensionIndex(
+ node, "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
+ && omx->getParameter(
+ node, describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
+ return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+ }
+
+ return DescribeDefaultColorFormat(describeParams);
+}
+
+// static
+bool IsFlexibleColorFormat(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
+ DescribeColorFormat2Params describeParams;
+ InitOMXParams(&describeParams);
+ describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
+ // reasonable dummy values
+ describeParams.nFrameWidth = 128;
+ describeParams.nFrameHeight = 128;
+ describeParams.nStride = 128;
+ describeParams.nSliceHeight = 128;
+ describeParams.bUsingNativeBuffers = (OMX_BOOL)usingNativeBuffers;
+
+ CHECK(flexibleEquivalent != NULL);
+
+ if (!DescribeColorFormat(omx, node, describeParams)) {
+ return false;
+ }
+
+ const MediaImage2 &img = describeParams.sMediaImage;
+ if (img.mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
+ if (img.mNumPlanes != 3
+ || img.mPlane[img.Y].mHorizSubsampling != 1
+ || img.mPlane[img.Y].mVertSubsampling != 1) {
+ return false;
+ }
+
+ // YUV 420
+ if (img.mPlane[img.U].mHorizSubsampling == 2
+ && img.mPlane[img.U].mVertSubsampling == 2
+ && img.mPlane[img.V].mHorizSubsampling == 2
+ && img.mPlane[img.V].mVertSubsampling == 2) {
+ // possible flexible YUV420 format
+ if (img.mBitDepth <= 8) {
+ *flexibleEquivalent = OMX_COLOR_FormatYUV420Flexible;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
} // namespace android
diff --git a/media/libstagefright/omx/OMXUtils.h b/media/libstagefright/omx/OMXUtils.h
index 0c5e537..315f118 100644
--- a/media/libstagefright/omx/OMXUtils.h
+++ b/media/libstagefright/omx/OMXUtils.h
@@ -17,6 +17,8 @@
#ifndef OMX_UTILS_H_
#define OMX_UTILS_H_
+#include <media/IOMX.h>
+
/***** DO NOT USE THIS INCLUDE!!! INTERAL ONLY!!! UNLESS YOU RESIDE IN media/libstagefright *****/
// OMXUtils contains omx-specific utility functions for stagefright/omx library
@@ -36,6 +38,19 @@
status_t StatusFromOMXError(OMX_ERRORTYPE err);
+const char *GetComponentRole(bool isEncoder, const char *mime);
+status_t SetComponentRole(const sp<IOMX> &omx, IOMX::node_id node, const char *role);
+
+struct DescribeColorFormat2Params;
+
+bool IsFlexibleColorFormat(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent);
+bool DescribeDefaultColorFormat(DescribeColorFormat2Params &describeParams);
+bool DescribeColorFormat(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ DescribeColorFormat2Params &describeParams);
+
} // namespace android
#endif
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 60c1e2e..89c50c5 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -77,19 +77,34 @@
switch (index) {
case OMX_IndexParamPortDefinition:
{
- portIndex = ((OMX_PARAM_PORTDEFINITIONTYPE *)params)->nPortIndex;
+ const OMX_PARAM_PORTDEFINITIONTYPE *portDefs =
+ (const OMX_PARAM_PORTDEFINITIONTYPE *) params;
+ if (!isValidOMXParam(portDefs)) {
+ return false;
+ }
+ portIndex = portDefs->nPortIndex;
break;
}
case OMX_IndexParamAudioPcm:
{
- portIndex = ((OMX_AUDIO_PARAM_PCMMODETYPE *)params)->nPortIndex;
+ const OMX_AUDIO_PARAM_PCMMODETYPE *pcmMode =
+ (const OMX_AUDIO_PARAM_PCMMODETYPE *) params;
+ if (!isValidOMXParam(pcmMode)) {
+ return false;
+ }
+ portIndex = pcmMode->nPortIndex;
break;
}
case OMX_IndexParamAudioAac:
{
- portIndex = ((OMX_AUDIO_PARAM_AACPROFILETYPE *)params)->nPortIndex;
+ const OMX_AUDIO_PARAM_AACPROFILETYPE *aacMode =
+ (const OMX_AUDIO_PARAM_AACPROFILETYPE *) params;
+ if (!isValidOMXParam(aacMode)) {
+ return false;
+ }
+ portIndex = aacMode->nPortIndex;
break;
}
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 0f9c00c..a773ca2 100755
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -56,6 +56,7 @@
{ "OMX.google.vp8.decoder", "vpxdec", "video_decoder.vp8" },
{ "OMX.google.vp9.decoder", "vpxdec", "video_decoder.vp9" },
{ "OMX.google.vp8.encoder", "vpxenc", "video_encoder.vp8" },
+ { "OMX.google.vp9.encoder", "vpxenc", "video_encoder.vp9" },
{ "OMX.google.raw.decoder", "rawdec", "audio_decoder.raw" },
{ "OMX.google.flac.encoder", "flacenc", "audio_encoder.flac" },
{ "OMX.google.gsm.decoder", "gsmdec", "audio_decoder.gsm" },
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 47573c3..350b370 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -17,7 +17,6 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ASessionDescription"
#include <utils/Log.h>
-#include <cutils/log.h>
#include "ASessionDescription.h"
@@ -212,7 +211,7 @@
*PT = x;
- char key[32];
+ char key[20];
snprintf(key, sizeof(key), "a=rtpmap:%lu", x);
CHECK(findAttribute(index, key, desc));
@@ -229,11 +228,8 @@
*width = 0;
*height = 0;
- char key[33];
+ char key[20];
snprintf(key, sizeof(key), "a=framesize:%lu", PT);
- if (PT > 9999999) {
- android_errorWriteLog(0x534e4554, "25747670");
- }
AString value;
if (!findAttribute(index, key, &value)) {
return false;
diff --git a/media/mtp/MtpDevice.h b/media/mtp/MtpDevice.h
index 4be44cf..c034c13 100644
--- a/media/mtp/MtpDevice.h
+++ b/media/mtp/MtpDevice.h
@@ -100,7 +100,7 @@
bool sendObject(MtpObjectHandle handle, int size, int srcFD);
bool deleteObject(MtpObjectHandle handle);
MtpObjectHandle getParent(MtpObjectHandle handle);
- MtpObjectHandle getStorageID(MtpObjectHandle handle);
+ MtpStorageID getStorageID(MtpObjectHandle handle);
MtpObjectPropertyList* getObjectPropsSupported(MtpObjectFormat format);
diff --git a/media/mtp/MtpDeviceInfo.cpp b/media/mtp/MtpDeviceInfo.cpp
index 3e1dff7..3d5cb06 100644
--- a/media/mtp/MtpDeviceInfo.cpp
+++ b/media/mtp/MtpDeviceInfo.cpp
@@ -69,6 +69,7 @@
if (!packet.getString(string)) return false;
mVendorExtensionDesc = strdup((const char *)string);
+ if (!mVendorExtensionDesc) return false;
if (!packet.getUInt16(mFunctionalMode)) return false;
mOperations = packet.getAUInt16();
@@ -84,12 +85,16 @@
if (!packet.getString(string)) return false;
mManufacturer = strdup((const char *)string);
+ if (!mManufacturer) return false;
if (!packet.getString(string)) return false;
mModel = strdup((const char *)string);
+ if (!mModel) return false;
if (!packet.getString(string)) return false;
mVersion = strdup((const char *)string);
+ if (!mVersion) return false;
if (!packet.getString(string)) return false;
mSerial = strdup((const char *)string);
+ if (!mSerial) return false;
return true;
}
diff --git a/media/mtp/MtpObjectInfo.cpp b/media/mtp/MtpObjectInfo.cpp
index 0573104..43b745f 100644
--- a/media/mtp/MtpObjectInfo.cpp
+++ b/media/mtp/MtpObjectInfo.cpp
@@ -77,6 +77,7 @@
if (!packet.getString(string)) return false;
mName = strdup((const char *)string);
+ if (!mName) return false;
if (!packet.getString(string)) return false;
if (parseDateTime((const char*)string, time))
@@ -88,6 +89,7 @@
if (!packet.getString(string)) return false;
mKeywords = strdup((const char *)string);
+ if (!mKeywords) return false;
return true;
}
diff --git a/media/mtp/MtpPacket.cpp b/media/mtp/MtpPacket.cpp
index 35ecb4f..3dd4248 100644
--- a/media/mtp/MtpPacket.cpp
+++ b/media/mtp/MtpPacket.cpp
@@ -70,8 +70,8 @@
char* bufptr = buffer;
for (size_t i = 0; i < mPacketSize; i++) {
- sprintf(bufptr, "%02X ", mBuffer[i]);
- bufptr += strlen(bufptr);
+ bufptr += snprintf(bufptr, sizeof(buffer) - (bufptr - buffer), "%02X ",
+ mBuffer[i]);
if (i % DUMP_BYTES_PER_ROW == (DUMP_BYTES_PER_ROW - 1)) {
ALOGV("%s", buffer);
bufptr = buffer;
diff --git a/media/mtp/MtpStorageInfo.cpp b/media/mtp/MtpStorageInfo.cpp
index 5d4ebbf..8801a38 100644
--- a/media/mtp/MtpStorageInfo.cpp
+++ b/media/mtp/MtpStorageInfo.cpp
@@ -58,8 +58,10 @@
if (!packet.getString(string)) return false;
mStorageDescription = strdup((const char *)string);
+ if (!mStorageDescription) return false;
if (!packet.getString(string)) return false;
mVolumeIdentifier = strdup((const char *)string);
+ if (!mVolumeIdentifier) return false;
return true;
}
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
index e615700..79a3a5a 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -149,6 +149,15 @@
}
template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::reset()
+{
+ // clear resampler state
+ if (mState != nullptr) {
+ memset(mState, 0, mStateCount * sizeof(TI));
+ }
+}
+
+template<typename TC, typename TI, typename TO>
void AudioResamplerDyn<TC, TI, TO>::Constants::set(
int L, int halfNumCoefs, int inSampleRate, int outSampleRate)
{
@@ -528,6 +537,9 @@
mBuffer.frameCount = inFrameCount;
provider->getNextBuffer(&mBuffer);
if (mBuffer.raw == NULL) {
+ // We are either at the end of playback or in an underrun situation.
+ // Reset buffer to prevent pop noise at the next buffer.
+ mInBuffer.reset();
goto resample_exit;
}
inFrameCount -= mBuffer.frameCount;
diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h
index 3b1c381..a5ea821 100644
--- a/services/audioflinger/AudioResamplerDyn.h
+++ b/services/audioflinger/AudioResamplerDyn.h
@@ -96,6 +96,8 @@
inline void readAdvance(TI*& impulse, const int halfNumCoefs,
const TI* const in, const size_t inputIndex);
+ void reset();
+
private:
// tuning parameter guidelines: 2 <= multiple <= 8
static const int kStateSizeMultipleOfFilterLength = 4;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 5601bde..cfa3e1a 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -166,7 +166,7 @@
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
bool mIsInvalid; // non-resettable latch, set by invalidate()
- AudioTrackServerProxy* mAudioTrackServerProxy;
+ sp<AudioTrackServerProxy> mAudioTrackServerProxy;
bool mResumeToStopping; // track was paused in stopping state.
bool mFlushHwPending; // track requests for thread flush
audio_output_flags_t mFlags;
@@ -214,8 +214,8 @@
Vector < Buffer* > mBufferQueue;
AudioBufferProvider::Buffer mOutBuffer;
bool mActive;
- DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
- AudioTrackClientProxy* mClientProxy;
+ DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
+ sp<AudioTrackClientProxy> mClientProxy;
}; // end of OutputTrack
// playback track, used by PatchPanel
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3fdf6aa..aa59534 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -4283,7 +4283,7 @@
// read original volumes with volume control
float typeVolume = mStreamTypes[track->streamType()].volume;
float v = masterVolume * typeVolume;
- AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+ sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
@@ -4773,7 +4773,7 @@
} else {
float typeVolume = mStreamTypes[track->streamType()].volume;
float v = mMasterVolume * typeVolume;
- AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+ sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
left = float_from_gain(gain_minifloat_unpack_left(vlr));
if (left > GAIN_FLOAT_UNITY) {
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 6b97246..7c48375 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -156,7 +156,7 @@
int mUid;
Vector < sp<SyncEvent> >mSyncEvents;
const bool mIsOut;
- ServerProxy* mServerProxy;
+ sp<ServerProxy> mServerProxy;
const int mId;
sp<NBAIO_Sink> mTeeSink;
sp<NBAIO_Source> mTeeSource;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index b387af3..450c46d 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -52,7 +52,7 @@
// TODO move to a common header (Also shared with AudioTrack.cpp)
#define NANOS_PER_SECOND 1000000000
-#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * NANOS_PER_SECOND + time.tv_nsec)
+#define TIME_TO_NANOS(time) ((uint64_t)(time).tv_sec * NANOS_PER_SECOND + (time).tv_nsec)
namespace android {
@@ -93,7 +93,6 @@
mFrameCount(frameCount),
mSessionId(sessionId),
mIsOut(isOut),
- mServerProxy(NULL),
mId(android_atomic_inc(&nextTrackId)),
mTerminated(false),
mType(type),
@@ -218,7 +217,7 @@
dumpTee(-1, mTeeSource, mId);
#endif
// delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
- delete mServerProxy;
+ mServerProxy.clear();
if (mCblk != NULL) {
if (mClient == 0) {
delete mCblk;
@@ -364,7 +363,6 @@
mFastIndex(-1),
mCachedVolume(1.0),
mIsInvalid(false),
- mAudioTrackServerProxy(NULL),
mResumeToStopping(false),
mFlushHwPending(false),
mFlags(flags)
@@ -1134,7 +1132,7 @@
sampleRate, format, channelMask, frameCount,
NULL, 0, AUDIO_SESSION_NONE, uid, AUDIO_OUTPUT_FLAG_NONE,
TYPE_OUTPUT),
- mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
+ mActive(false), mSourceThread(sourceThread)
{
if (mCblk != NULL) {
@@ -1159,7 +1157,6 @@
AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
{
clearBufferQueue();
- delete mClientProxy;
// superclass destructor will now delete the server proxy and shared memory both refer to
}
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index a215b95..d987469 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -67,6 +67,16 @@
API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
} input_type_t;
+ enum {
+ API_INPUT_CONCURRENCY_NONE = 0,
+ API_INPUT_CONCURRENCY_CALL = (1 << 0), // Concurrency with a call
+ API_INPUT_CONCURRENCY_CAPTURE = (1 << 1), // Concurrency with another capture
+
+ API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_CALL | API_INPUT_CONCURRENCY_CAPTURE),
+ };
+
+ typedef uint32_t concurrency_type__mask_t;
+
public:
virtual ~AudioPolicyInterface() {}
//
@@ -140,7 +150,8 @@
input_type_t *inputType) = 0;
// indicates to the audio policy manager that the input starts being used.
virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session) = 0;
+ audio_session_t session,
+ concurrency_type__mask_t *concurrency) = 0;
// indicates to the audio policy manager that the input stops being used.
virtual status_t stopInput(audio_io_handle_t input,
audio_session_t session) = 0;
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 55ee91f..71d70de 100755
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -33,9 +33,9 @@
/**
* A device mask for all audio input devices that are considered "virtual" when evaluating
- * active inputs in getActiveInput()
+ * active inputs in getActiveInputs()
*/
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_FM_TUNER)
+#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX)
/**
@@ -109,6 +109,44 @@
((device & APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL) != 0));
}
+/**
+ * Returns the priority of a given audio source for capture. The priority is used when more than one
+ * capture session is active on a given input stream to determine which session drives routing and
+ * effect configuration.
+ *
+ * @param[in] inputSource to consider. Valid sources are:
+ * - AUDIO_SOURCE_VOICE_COMMUNICATION
+ * - AUDIO_SOURCE_CAMCORDER
+ * - AUDIO_SOURCE_MIC
+ * - AUDIO_SOURCE_FM_TUNER
+ * - AUDIO_SOURCE_VOICE_RECOGNITION
+ * - AUDIO_SOURCE_HOTWORD
+ *
+ * @return the corresponding input source priority or 0 if priority is irrelevant for this source.
+ * This happens when the specified source cannot share a given input stream (e.g remote submix)
+ * The higher the value, the higher the priority.
+ */
+static inline int32_t source_priority(audio_source_t inputSource)
+{
+ switch (inputSource) {
+ case AUDIO_SOURCE_VOICE_COMMUNICATION:
+ return 6;
+ case AUDIO_SOURCE_CAMCORDER:
+ return 5;
+ case AUDIO_SOURCE_MIC:
+ return 4;
+ case AUDIO_SOURCE_FM_TUNER:
+ return 3;
+ case AUDIO_SOURCE_VOICE_RECOGNITION:
+ return 2;
+ case AUDIO_SOURCE_HOTWORD:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
/* Indicates if audio formats are equivalent when considering a match between
* audio HAL supported formats and client requested formats
*/
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 46309ed..fb3ae78 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -63,7 +63,9 @@
const sp<AudioSession>& audioSession);
status_t removeAudioSession(audio_session_t session);
sp<AudioSession> getAudioSession(audio_session_t session) const;
- AudioSessionCollection getActiveAudioSessions() const;
+ AudioSessionCollection getAudioSessions(bool activeOnly) const;
+ size_t getAudioSessionCount(bool activeOnly) const;
+ audio_source_t getHighestPrioritySource(bool activeOnly) const;
// implementation of AudioSessionInfoProvider
virtual audio_config_base_t getConfig() const;
@@ -100,7 +102,7 @@
* Only considers inputs from physical devices (e.g. main mic, headset mic) when
* ignoreVirtualInputs is true.
*/
- audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
+ Vector<sp <AudioInputDescriptor> > getActiveInputs(bool ignoreVirtualInputs = true);
audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index 211ec98..8130792 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -166,6 +166,10 @@
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const = 0;
virtual sp<AudioPort> getAudioPort() const = 0;
+ virtual bool hasSameHwModuleAs(const sp<AudioPortConfig>& other) const {
+ return (other != 0) &&
+ (other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
+ }
uint32_t mSamplingRate;
audio_format_t mFormat;
audio_channel_mask_t mChannelMask;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index 388c25d..18fba25 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -91,8 +91,10 @@
uint32_t getOpenCount() const;
AudioSessionCollection getActiveSessions() const;
+ size_t getActiveSessionCount() const;
bool hasActiveSession() const;
bool isSourceActive(audio_source_t source) const;
+ audio_source_t getHighestPrioritySource(bool activeOnly) const;
// implementation of AudioSessionInfoUpdateListener
virtual void onSessionInfoUpdate() const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index 1612714..069c9e7 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -67,19 +67,24 @@
};
struct StreamTraits
{
- typedef audio_stream_type_t Type;
- typedef Vector<Type> Collection;
+ typedef audio_stream_type_t Type;
+ typedef Vector<Type> Collection;
};
struct DeviceCategoryTraits
{
- typedef device_category Type;
- typedef Vector<Type> Collection;
+ typedef device_category Type;
+ typedef Vector<Type> Collection;
+};
+struct AudioModeTraits
+{
+ typedef audio_mode_t Type;
+ typedef Vector<Type> Collection;
};
template <typename T>
struct DefaultTraits
{
- typedef T Type;
- typedef Vector<Type> Collection;
+ typedef T Type;
+ typedef Vector<Type> Collection;
};
template <class Traits>
@@ -110,6 +115,8 @@
static uint32_t maskFromString(const std::string &str, const char *del = "|");
+ static void maskToString(uint32_t mask, std::string &str, const char *del = "|");
+
protected:
struct Table {
const char *literal;
@@ -117,7 +124,6 @@
};
static const Table mTable[];
- static const size_t mSize;
};
typedef TypeConverter<DeviceTraits> DeviceConverter;
@@ -130,6 +136,7 @@
typedef TypeConverter<GainModeTraits> GainModeConverter;
typedef TypeConverter<StreamTraits> StreamTypeConverter;
typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
+typedef TypeConverter<AudioModeTraits> AudioModeConverter;
inline
static SampleRateTraits::Collection samplingRatesFromString(const std::string &samplingRates,
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 6dacaa4..1164607 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -132,6 +132,12 @@
return mSessions.isSourceActive(source);
}
+audio_source_t AudioInputDescriptor::getHighestPrioritySource(bool activeOnly) const
+{
+
+ return mSessions.getHighestPrioritySource(activeOnly);
+}
+
bool AudioInputDescriptor::isSoundTrigger() const {
// sound trigger and non sound trigger sessions are not mixed
// on a given input
@@ -143,9 +149,22 @@
return mSessions.valueFor(session);
}
-AudioSessionCollection AudioInputDescriptor::getActiveAudioSessions() const
+AudioSessionCollection AudioInputDescriptor::getAudioSessions(bool activeOnly) const
{
- return mSessions.getActiveSessions();
+ if (activeOnly) {
+ return mSessions.getActiveSessions();
+ } else {
+ return mSessions;
+ }
+}
+
+size_t AudioInputDescriptor::getAudioSessionCount(bool activeOnly) const
+{
+ if (activeOnly) {
+ return mSessions.getActiveSessionCount();
+ } else {
+ return mSessions.size();
+ }
}
status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
@@ -234,17 +253,19 @@
return count;
}
-audio_io_handle_t AudioInputCollection::getActiveInput(bool ignoreVirtualInputs)
+Vector<sp <AudioInputDescriptor> > AudioInputCollection::getActiveInputs(bool ignoreVirtualInputs)
{
+ Vector<sp <AudioInputDescriptor> > activeInputs;
+
for (size_t i = 0; i < size(); i++) {
const sp<AudioInputDescriptor> inputDescriptor = valueAt(i);
if ((inputDescriptor->isActive())
&& (!ignoreVirtualInputs ||
!is_virtual_input_device(inputDescriptor->mDevice))) {
- return keyAt(i);
+ activeInputs.add(inputDescriptor);
}
}
- return 0;
+ return activeInputs;
}
audio_devices_t AudioInputCollection::getSupportedDevices(audio_io_handle_t handle) const
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 79bbc54..b942b79 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -81,7 +81,7 @@
return sharesHwModuleWith(outputDesc->subOutput1()) ||
sharesHwModuleWith(outputDesc->subOutput2());
} else {
- return (getModuleHandle() == outputDesc->getModuleHandle());
+ return hasSameHwModuleAs(outputDesc);
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index da983c5..3b63239 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -18,6 +18,7 @@
//#define LOG_NDEBUG 0
#include <AudioPolicyInterface.h>
+#include "policy.h"
#include "AudioSession.h"
#include "AudioGain.h"
#include "TypeConverter.h"
@@ -214,9 +215,20 @@
return activeSessions;
}
+size_t AudioSessionCollection::getActiveSessionCount() const
+{
+ size_t activeCount = 0;
+ for (size_t i = 0; i < size(); i++) {
+ if (valueAt(i)->activeCount() != 0) {
+ activeCount++;
+ }
+ }
+ return activeCount;
+}
+
bool AudioSessionCollection::hasActiveSession() const
{
- return getActiveSessions().size() != 0;
+ return getActiveSessionCount() != 0;
}
bool AudioSessionCollection::isSourceActive(audio_source_t source) const
@@ -236,6 +248,25 @@
return false;
}
+audio_source_t AudioSessionCollection::getHighestPrioritySource(bool activeOnly) const
+{
+ audio_source_t source = AUDIO_SOURCE_DEFAULT;
+ int32_t priority = -1;
+
+ for (size_t i = 0; i < size(); i++) {
+ const sp<AudioSession> audioSession = valueAt(i);
+ if (activeOnly && audioSession->activeCount() == 0) {
+ continue;
+ }
+ int32_t curPriority = source_priority(audioSession->inputSource());
+ if (curPriority > priority) {
+ priority = curPriority;
+ source = audioSession->inputSource();
+ }
+ }
+ return source;
+}
+
void AudioSessionCollection::onSessionInfoUpdate() const
{
for (size_t i = 0; i < size(); i++) {
@@ -243,7 +274,6 @@
}
}
-
status_t AudioSessionCollection::dump(int fd, int spaces) const
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index abf2dd4..d74fb0a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -108,8 +108,18 @@
AudioPort::dump(fd, 4);
- snprintf(buffer, SIZE, " - flags: 0x%04x\n", getFlags());
+ snprintf(buffer, SIZE, " - flags: 0x%04x", getFlags());
result.append(buffer);
+ std::string flagsLiteral;
+ if (getRole() == AUDIO_PORT_ROLE_SINK) {
+ InputFlagConverter::maskToString(getFlags(), flagsLiteral);
+ } else if (getRole() == AUDIO_PORT_ROLE_SOURCE) {
+ OutputFlagConverter::maskToString(getFlags(), flagsLiteral);
+ }
+ if (!flagsLiteral.empty()) {
+ result.appendFormat(" (%s)", flagsLiteral.c_str());
+ }
+ result.append("\n");
write(fd, result.string(), result.size());
mSupportedDevices.dump(fd, String8("Supported"), 4, false);
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 48bfd79..cfc0985 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -78,10 +78,6 @@
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
};
-template<>
-const size_t DeviceConverter::mSize = sizeof(DeviceConverter::mTable) /
- sizeof(DeviceConverter::mTable[0]);
-
template <>
const OutputFlagConverter::Table OutputFlagConverter::mTable[] = {
@@ -96,9 +92,6 @@
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_RAW),
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
};
-template<>
-const size_t OutputFlagConverter::mSize = sizeof(OutputFlagConverter::mTable) /
- sizeof(OutputFlagConverter::mTable[0]);
template <>
@@ -108,9 +101,6 @@
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
};
-template<>
-const size_t InputFlagConverter::mSize = sizeof(InputFlagConverter::mTable) /
- sizeof(InputFlagConverter::mTable[0]);
template <>
@@ -144,9 +134,6 @@
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
};
-template<>
-const size_t FormatConverter::mSize = sizeof(FormatConverter::mTable) /
- sizeof(FormatConverter::mTable[0]);
template <>
@@ -157,9 +144,6 @@
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
};
-template<>
-const size_t OutputChannelConverter::mSize = sizeof(OutputChannelConverter::mTable) /
- sizeof(OutputChannelConverter::mTable[0]);
template <>
@@ -168,9 +152,6 @@
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
};
-template<>
-const size_t InputChannelConverter::mSize = sizeof(InputChannelConverter::mTable) /
- sizeof(InputChannelConverter::mTable[0]);
template <>
const ChannelIndexConverter::Table ChannelIndexConverter::mTable[] = {
@@ -183,9 +164,6 @@
{"AUDIO_CHANNEL_INDEX_MASK_7", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_7)},
{"AUDIO_CHANNEL_INDEX_MASK_8", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_8)},
};
-template<>
-const size_t ChannelIndexConverter::mSize = sizeof(ChannelIndexConverter::mTable) /
- sizeof(ChannelIndexConverter::mTable[0]);
template <>
@@ -195,9 +173,6 @@
MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_RAMP),
};
-template<>
-const size_t GainModeConverter::mSize = sizeof(GainModeConverter::mTable) /
- sizeof(GainModeConverter::mTable[0]);
template <>
const DeviceCategoryConverter::Table DeviceCategoryConverter::mTable[] = {
@@ -207,9 +182,6 @@
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA)
};
-template<>
-const size_t DeviceCategoryConverter::mSize = sizeof(DeviceCategoryConverter::mTable) /
- sizeof(DeviceCategoryConverter::mTable[0]);
template <>
const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
@@ -228,26 +200,37 @@
MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
};
+
template<>
-const size_t StreamTypeConverter::mSize = sizeof(StreamTypeConverter::mTable) /
- sizeof(StreamTypeConverter::mTable[0]);
+const AudioModeConverter::Table AudioModeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_INVALID),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_CURRENT),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_NORMAL),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_RINGTONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
+};
+
template <class Traits>
bool TypeConverter<Traits>::toString(const typename Traits::Type &value, std::string &str)
{
- for (size_t i = 0; i < mSize; i++) {
+ for (size_t i = 0; i < sizeof(mTable) / sizeof(mTable[0]); i++) {
if (mTable[i].value == value) {
str = mTable[i].literal;
return true;
}
}
+ char result[64];
+ snprintf(result, sizeof(result), "Unknown enum value %d", value);
+ str = result;
return false;
}
template <class Traits>
bool TypeConverter<Traits>::fromString(const std::string &str, typename Traits::Type &result)
{
- for (size_t i = 0; i < mSize; i++) {
+ for (size_t i = 0; i < sizeof(mTable) / sizeof(mTable[0]); i++) {
if (strcmp(mTable[i].literal, str.c_str()) == 0) {
ALOGV("stringToEnum() found %s", mTable[i].literal);
result = mTable[i].value;
@@ -288,6 +271,19 @@
return value;
}
+template <class Traits>
+void TypeConverter<Traits>::maskToString(uint32_t mask, std::string &str, const char *del)
+{
+ bool first_flag = true;
+ for (size_t i = 0; i < sizeof(mTable) / sizeof(mTable[0]); i++) {
+ if ((mask & mTable[i].value) == mTable[i].value) {
+ if (!first_flag) str += del;
+ first_flag = false;
+ str += mTable[i].literal;
+ }
+ }
+}
+
template class TypeConverter<DeviceTraits>;
template class TypeConverter<OutputFlagTraits>;
template class TypeConverter<InputFlagTraits>;
@@ -298,6 +294,7 @@
template class TypeConverter<GainModeTraits>;
template class TypeConverter<StreamTraits>;
template class TypeConverter<DeviceCategoryTraits>;
+template class TypeConverter<AudioModeTraits>;
}; // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 84306c6..114df81 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -441,15 +441,17 @@
// FIXME: would be better to refine to only inputs whose profile connects to the
// call TX device but this information is not in the audio patch and logic here must be
// symmetric to the one in startInput()
- audio_io_handle_t activeInput = mInputs.getActiveInput();
- if (activeInput != 0) {
- sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
- if (activeDesc->getModuleHandle() == txSourceDeviceDesc->getModuleHandle()) {
- //FIXME: consider all active sessions
- AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
- audio_session_t activeSession = activeSessions.keyAt(0);
- stopInput(activeInput, activeSession);
- releaseInput(activeInput, activeSession);
+ Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
+ AudioSessionCollection activeSessions =
+ activeDesc->getAudioSessions(true /*activeOnly*/);
+ for (size_t j = 0; j < activeSessions.size(); j++) {
+ audio_session_t activeSession = activeSessions.keyAt(j);
+ stopInput(activeDesc->mIoHandle, activeSession);
+ releaseInput(activeDesc->mIoHandle, activeSession);
+ }
}
}
@@ -610,15 +612,16 @@
}
}
- audio_io_handle_t activeInput = mInputs.getActiveInput();
- if (activeInput != 0) {
- sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
- audio_devices_t newDevice = getNewInputDevice(activeInput);
+ Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ audio_devices_t newDevice = getNewInputDevice(activeDesc);
// Force new input selection if the new device can not be reached via current input
- if (activeDesc->mProfile->getSupportedDevices().types() & (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
- setInputDevice(activeInput, newDevice);
+ if (activeDesc->mProfile->getSupportedDevices().types() &
+ (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
+ setInputDevice(activeDesc->mIoHandle, newDevice);
} else {
- closeInput(activeInput);
+ closeInput(activeDesc->mIoHandle);
}
}
}
@@ -1385,6 +1388,7 @@
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
+
audio_devices_t device;
// handle legacy remote submix case where the address was not always specified
String8 address = String8("");
@@ -1528,14 +1532,22 @@
isSoundTrigger,
policyMix, mpClientInterface);
-// TODO enable input reuse
-#if 0
+
// reuse an open input if possible
for (size_t i = 0; i < mInputs.size(); i++) {
sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
- // reuse input if it shares the same profile and same sound trigger attribute
- if (profile == desc->mProfile &&
- isSoundTrigger == desc->isSoundTrigger()) {
+ // reuse input if:
+ // - it shares the same profile
+ // AND
+ // - it is not a reroute submix input
+ // AND
+ // - it is: not used for sound trigger
+ // OR
+ // used for sound trigger and all clients use the same session ID
+ //
+ if ((profile == desc->mProfile) &&
+ (isSoundTrigger == desc->isSoundTrigger()) &&
+ !is_virtual_input_device(device)) {
sp<AudioSession> as = desc->getAudioSession(session);
if (as != 0) {
@@ -1545,16 +1557,33 @@
} else {
ALOGW("getInputForDevice() record with different attributes"
" exists for session %d", session);
- return input;
+ break;
}
+ } else if (isSoundTrigger) {
+ break;
+ }
+ // force close input if current source is now the highest priority request on this input
+ // and current input properties are not exactly as requested.
+ if ((desc->mSamplingRate != samplingRate ||
+ desc->mChannelMask != channelMask ||
+ !audio_formats_match(desc->mFormat, format)) &&
+ (source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
+ source_priority(inputSource))) {
+ ALOGV("%s: ", __FUNCTION__);
+ AudioSessionCollection sessions = desc->getAudioSessions(false /*activeOnly*/);
+ for (size_t j = 0; j < sessions.size(); j++) {
+ audio_session_t currentSession = sessions.keyAt(j);
+ stopInput(desc->mIoHandle, currentSession);
+ releaseInput(desc->mIoHandle, currentSession);
+ }
+ break;
} else {
desc->addAudioSession(session, audioSession);
+ ALOGV("%s: reusing input %d", __FUNCTION__, mInputs.keyAt(i));
+ return mInputs.keyAt(i);
}
- ALOGV("getInputForDevice() reusing input %d", mInputs.keyAt(i));
- return mInputs.keyAt(i);
}
}
-#endif
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = profileSamplingRate;
@@ -1597,10 +1626,50 @@
return input;
}
+bool AudioPolicyManager::isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
+ const sp<AudioSession>& audioSession)
+{
+ // Do not allow capture if an active voice call is using a software patch and
+ // the call TX source device is on the same HW module.
+ // FIXME: would be better to refine to only inputs whose profile connects to the
+ // call TX device but this information is not in the audio patch
+ if (mCallTxPatch != 0 &&
+ inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
+ return false;
+ }
+
+ // starting concurrent capture is enabled if:
+ // 1) capturing for re-routing
+ // 2) capturing for HOTWORD source
+ // 3) capturing for FM TUNER source
+ // 3) All other active captures are either for re-routing or HOTWORD
+
+ if (is_virtual_input_device(inputDesc->mDevice) ||
+ audioSession->inputSource() == AUDIO_SOURCE_HOTWORD ||
+ audioSession->inputSource() == AUDIO_SOURCE_FM_TUNER) {
+ return true;
+ }
+
+ Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeInput = activeInputs[i];
+ if ((activeInput->inputSource() != AUDIO_SOURCE_HOTWORD) &&
+ (activeInput->inputSource() != AUDIO_SOURCE_FM_TUNER) &&
+ !is_virtual_input_device(activeInput->mDevice)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
status_t AudioPolicyManager::startInput(audio_io_handle_t input,
- audio_session_t session)
+ audio_session_t session,
+ concurrency_type__mask_t *concurrency)
{
ALOGV("startInput() input %d", input);
+ *concurrency = API_INPUT_CONCURRENCY_NONE;
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
ALOGW("startInput() unknown input %d", input);
@@ -1614,81 +1683,62 @@
return BAD_VALUE;
}
- // virtual input devices are compatible with other input devices
- if (!is_virtual_input_device(inputDesc->mDevice)) {
-
- // for a non-virtual input device, check if there is another (non-virtual) active input
- audio_io_handle_t activeInput = mInputs.getActiveInput();
- if (activeInput != 0 && activeInput != input) {
-
- // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
- // otherwise the active input continues and the new input cannot be started.
- sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
- if ((activeDesc->inputSource() == AUDIO_SOURCE_HOTWORD) &&
- !activeDesc->hasPreemptedSession(session)) {
- ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
- //FIXME: consider all active sessions
- AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
- audio_session_t activeSession = activeSessions.keyAt(0);
- SortedVector<audio_session_t> sessions =
- activeDesc->getPreemptedSessions();
- sessions.add(activeSession);
- inputDesc->setPreemptedSessions(sessions);
- stopInput(activeInput, activeSession);
- releaseInput(activeInput, activeSession);
- } else {
- ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
- return INVALID_OPERATION;
- }
- }
-
- // Do not allow capture if an active voice call is using a software patch and
- // the call TX source device is on the same HW module.
- // FIXME: would be better to refine to only inputs whose profile connects to the
- // call TX device but this information is not in the audio patch
- if (mCallTxPatch != 0 &&
- inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
- return INVALID_OPERATION;
- }
+ if (!isConcurentCaptureAllowed(inputDesc, audioSession)) {
+ ALOGW("startInput(%d) failed: other input already started", input);
+ return INVALID_OPERATION;
}
+ if (isInCall()) {
+ *concurrency |= API_INPUT_CONCURRENCY_CALL;
+ }
+ if (mInputs.activeInputsCount() != 0) {
+ *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
+ }
+
+ // increment activity count before calling getNewInputDevice() below as only active sessions
+ // are considered for device selection
+ audioSession->changeActiveCount(1);
+
// Routing?
mInputRoutes.incRouteActivity(session);
- if (!inputDesc->isActive() || mInputRoutes.hasRouteChanged(session)) {
- // if input maps to a dynamic policy with an activity listener, notify of state change
- if ((inputDesc->mPolicyMix != NULL)
- && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
- mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
- MIX_STATE_MIXING);
- }
+ if (audioSession->activeCount() == 1 || mInputRoutes.hasRouteChanged(session)) {
- if (mInputs.activeInputsCount() == 0) {
- SoundTrigger::setCaptureState(true);
- }
- setInputDevice(input, getNewInputDevice(input), true /* force */);
+ setInputDevice(input, getNewInputDevice(inputDesc), true /* force */);
- // automatically enable the remote submix output when input is started if not
- // used by a policy mix of type MIX_TYPE_RECORDERS
- // For remote submix (a virtual device), we open only one input per capture request.
- if (audio_is_remote_submix_device(inputDesc->mDevice)) {
- String8 address = String8("");
- if (inputDesc->mPolicyMix == NULL) {
- address = String8("0");
- } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
- address = inputDesc->mPolicyMix->mDeviceAddress;
+ if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
+ // if input maps to a dynamic policy with an activity listener, notify of state change
+ if ((inputDesc->mPolicyMix != NULL)
+ && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+ mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+ MIX_STATE_MIXING);
}
- if (address != "") {
- setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
- address, "remote-submix");
+
+ if (mInputs.activeInputsCount() == 0) {
+ SoundTrigger::setCaptureState(true);
+ }
+
+ // automatically enable the remote submix output when input is started if not
+ // used by a policy mix of type MIX_TYPE_RECORDERS
+ // For remote submix (a virtual device), we open only one input per capture request.
+ if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+ String8 address = String8("");
+ if (inputDesc->mPolicyMix == NULL) {
+ address = String8("0");
+ } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+ address = inputDesc->mPolicyMix->mDeviceAddress;
+ }
+ if (address != "") {
+ setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address, "remote-submix");
+ }
}
}
}
ALOGV("AudioPolicyManager::startInput() input source = %d", audioSession->inputSource());
- audioSession->changeActiveCount(1);
return NO_ERROR;
}
@@ -1719,36 +1769,41 @@
// Routing?
mInputRoutes.decRouteActivity(session);
- if (!inputDesc->isActive()) {
- // if input maps to a dynamic policy with an activity listener, notify of state change
- if ((inputDesc->mPolicyMix != NULL)
- && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
- mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
- MIX_STATE_IDLE);
- }
+ if (audioSession->activeCount() == 0) {
- // automatically disable the remote submix output when input is stopped if not
- // used by a policy mix of type MIX_TYPE_RECORDERS
- if (audio_is_remote_submix_device(inputDesc->mDevice)) {
- String8 address = String8("");
- if (inputDesc->mPolicyMix == NULL) {
- address = String8("0");
- } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
- address = inputDesc->mPolicyMix->mDeviceAddress;
+ if (inputDesc->isActive()) {
+ setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
+ } else {
+ // if input maps to a dynamic policy with an activity listener, notify of state change
+ if ((inputDesc->mPolicyMix != NULL)
+ && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+ mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+ MIX_STATE_IDLE);
}
- if (address != "") {
- setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- address, "remote-submix");
+
+ // automatically disable the remote submix output when input is stopped if not
+ // used by a policy mix of type MIX_TYPE_RECORDERS
+ if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+ String8 address = String8("");
+ if (inputDesc->mPolicyMix == NULL) {
+ address = String8("0");
+ } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+ address = inputDesc->mPolicyMix->mDeviceAddress;
+ }
+ if (address != "") {
+ setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address, "remote-submix");
+ }
}
- }
- resetInputDevice(input);
+ resetInputDevice(input);
- if (mInputs.activeInputsCount() == 0) {
- SoundTrigger::setCaptureState(false);
+ if (mInputs.activeInputsCount() == 0) {
+ SoundTrigger::setCaptureState(false);
+ }
+ inputDesc->clearPreemptedSessions();
}
- inputDesc->clearPreemptedSessions();
}
return NO_ERROR;
}
@@ -2224,7 +2279,9 @@
snprintf(buffer, SIZE, " Primary Output: %d\n",
hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
result.append(buffer);
- snprintf(buffer, SIZE, " Phone state: %d\n", mEngine->getPhoneState());
+ std::string stateLiteral;
+ AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
+ snprintf(buffer, SIZE, " Phone state: %s\n", stateLiteral.c_str());
result.append(buffer);
snprintf(buffer, SIZE, " Force use for communications %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
@@ -2613,7 +2670,7 @@
// create a software bridge in PatchPanel if:
// - source and sink devices are on differnt HW modules OR
// - audio HAL version is < 3.0
- if ((srcDeviceDesc->getModuleHandle() != sinkDeviceDesc->getModuleHandle()) ||
+ if (!srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) ||
(srcDeviceDesc->mModule->getHalVersion() < AUDIO_DEVICE_API_VERSION_3_0)) {
// support only one sink device for now to simplify output selection logic
if (patch->num_sinks > 1) {
@@ -2713,7 +2770,7 @@
return BAD_VALUE;
}
setInputDevice(inputDesc->mIoHandle,
- getNewInputDevice(inputDesc->mIoHandle),
+ getNewInputDevice(inputDesc),
true,
NULL);
} else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
@@ -4340,9 +4397,9 @@
return device;
}
-audio_devices_t AudioPolicyManager::getNewInputDevice(audio_io_handle_t input)
+audio_devices_t AudioPolicyManager::getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc)
{
- sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+ audio_devices_t device = AUDIO_DEVICE_NONE;
ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (index >= 0) {
@@ -4354,7 +4411,12 @@
}
}
- audio_devices_t device = getDeviceAndMixForInputSource(inputDesc->inputSource());
+ audio_source_t source = inputDesc->getHighestPrioritySource(true /*activeOnly*/);
+ if (isInCall()) {
+ device = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+ } else if (source != AUDIO_SOURCE_DEFAULT) {
+ device = getDeviceAndMixForInputSource(source);
+ }
return device;
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 1ac89dd..4d89278 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -139,7 +139,8 @@
// indicates to the audio policy manager that the input starts being used.
virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session);
+ audio_session_t session,
+ concurrency_type__mask_t *concurrency);
// indicates to the audio policy manager that the input stops being used.
virtual status_t stopInput(audio_io_handle_t input,
@@ -405,7 +406,7 @@
void updateDevicesAndOutputs();
// selects the most appropriate device on input for current state
- audio_devices_t getNewInputDevice(audio_io_handle_t input);
+ audio_devices_t getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc);
virtual uint32_t getMaxEffectsCpuLoad()
{
@@ -506,6 +507,8 @@
void clearAudioSources(uid_t uid);
+ bool isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
+ const sp<AudioSession>& audioSession);
static bool streamsMatchForvolume(audio_stream_type_t stream1,
audio_stream_type_t stream2);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index b732b20..9a28137 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -57,11 +57,11 @@
}
mInputSources.clear();
- for (i = 0; i < mInputs.size(); i++) {
- mInputs.valueAt(i)->mEffects.clear();
- delete mInputs.valueAt(i);
+ for (i = 0; i < mInputSessions.size(); i++) {
+ mInputSessions.valueAt(i)->mEffects.clear();
+ delete mInputSessions.valueAt(i);
}
- mInputs.clear();
+ mInputSessions.clear();
// release audio output processing resources
for (i = 0; i < mOutputStreams.size(); i++) {
@@ -93,19 +93,19 @@
ALOGV("addInputEffects(): no processing needs to be attached to this source");
return status;
}
- ssize_t idx = mInputs.indexOfKey(input);
- EffectVector *inputDesc;
+ ssize_t idx = mInputSessions.indexOfKey(audioSession);
+ EffectVector *sessionDesc;
if (idx < 0) {
- inputDesc = new EffectVector(audioSession);
- mInputs.add(input, inputDesc);
+ sessionDesc = new EffectVector(audioSession);
+ mInputSessions.add(audioSession, sessionDesc);
} else {
// EffectVector is existing and we just need to increase ref count
- inputDesc = mInputs.valueAt(idx);
+ sessionDesc = mInputSessions.valueAt(idx);
}
- inputDesc->mRefCount++;
+ sessionDesc->mRefCount++;
- ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
- if (inputDesc->mRefCount == 1) {
+ ALOGV("addInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
+ if (sessionDesc->mRefCount == 1) {
Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
EffectDesc *effect = effects[i];
@@ -123,30 +123,31 @@
}
ALOGV("addInputEffects(): added Fx %s on source: %d",
effect->mName, (int32_t)aliasSource);
- inputDesc->mEffects.add(fx);
+ sessionDesc->mEffects.add(fx);
}
- inputDesc->setProcessorEnabled(true);
+ sessionDesc->setProcessorEnabled(true);
}
return status;
}
-status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input)
+status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input,
+ audio_session_t audioSession)
{
status_t status = NO_ERROR;
Mutex::Autolock _l(mLock);
- ssize_t index = mInputs.indexOfKey(input);
+ ssize_t index = mInputSessions.indexOfKey(audioSession);
if (index < 0) {
return status;
}
- EffectVector *inputDesc = mInputs.valueAt(index);
- inputDesc->mRefCount--;
- ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
- if (inputDesc->mRefCount == 0) {
- inputDesc->setProcessorEnabled(false);
- delete inputDesc;
- mInputs.removeItemsAt(index);
+ EffectVector *sessionDesc = mInputSessions.valueAt(index);
+ sessionDesc->mRefCount--;
+ ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
+ if (sessionDesc->mRefCount == 0) {
+ sessionDesc->setProcessorEnabled(false);
+ delete sessionDesc;
+ mInputSessions.removeItemsAt(index);
ALOGV("releaseInputEffects(): all effects released");
}
return status;
@@ -160,16 +161,16 @@
Mutex::Autolock _l(mLock);
size_t index;
- for (index = 0; index < mInputs.size(); index++) {
- if (mInputs.valueAt(index)->mSessionId == audioSession) {
+ for (index = 0; index < mInputSessions.size(); index++) {
+ if (mInputSessions.valueAt(index)->mSessionId == audioSession) {
break;
}
}
- if (index == mInputs.size()) {
+ if (index == mInputSessions.size()) {
*count = 0;
return BAD_VALUE;
}
- Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects;
+ Vector< sp<AudioEffect> > effects = mInputSessions.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
effect_descriptor_t desc = effects[i]->descriptor();
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index ee9bd50..f68966f 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -62,7 +62,8 @@
audio_session_t audioSession);
// Add all input effects associated to this input
- status_t releaseInputEffects(audio_io_handle_t input);
+ status_t releaseInputEffects(audio_io_handle_t input,
+ audio_session_t audioSession);
// Return a list of effect descriptors for default output effects
@@ -178,12 +179,12 @@
size_t *curSize,
size_t *totSize);
- // protects access to mInputSources, mInputs, mOutputStreams, mOutputSessions
+ // protects access to mInputSources, mInputSessions, mOutputStreams, mOutputSessions
Mutex mLock;
// Automatic input effects are configured per audio_source_t
KeyedVector< audio_source_t, EffectDescVector* > mInputSources;
// Automatic input effects are unique for audio_io_handle_t
- KeyedVector< audio_io_handle_t, EffectVector* > mInputs;
+ KeyedVector< audio_session_t, EffectVector* > mInputSessions;
// Automatic output effects are organized per audio_stream_type_t
KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index c9b3abc..dc45a54 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -361,8 +361,23 @@
return NO_INIT;
}
Mutex::Autolock _l(mLock);
+ AudioPolicyInterface::concurrency_type__mask_t concurrency;
+ status_t status = mAudioPolicyManager->startInput(input, session, &concurrency);
- return mAudioPolicyManager->startInput(input, session);
+ if (status == NO_ERROR) {
+ LOG_ALWAYS_FATAL_IF(concurrency & ~AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL,
+ "startInput(): invalid concurrency type %d", (int)concurrency);
+
+ // enforce permission (if any) required for each type of concurrency
+ if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL) {
+ //TODO: check incall capture permission
+ }
+ if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE) {
+ //TODO: check concurrent capture permission
+ }
+ }
+
+ return status;
}
status_t AudioPolicyService::stopInput(audio_io_handle_t input,
@@ -390,7 +405,7 @@
}
if (audioPolicyEffects != 0) {
// release audio processors from the input
- status_t status = audioPolicyEffects->releaseInputEffects(input);
+ status_t status = audioPolicyEffects->releaseInputEffects(input, session);
if(status != NO_ERROR) {
ALOGW("Failed to release effects on input %d", input);
}
@@ -566,7 +581,8 @@
*count = 0;
return NO_INIT;
}
- return audioPolicyEffects->queryDefaultInputEffects(audioSession, descriptors, count);
+ return audioPolicyEffects->queryDefaultInputEffects(
+ (audio_session_t)audioSession, descriptors, count);
}
bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
index 7c9315d..612076b 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
@@ -307,7 +307,7 @@
}
void AudioPolicyService::releaseInput(audio_io_handle_t input,
- audio_session_t session __unused)
+ audio_session_t session)
{
if (mpAudioPolicy == NULL) {
return;
@@ -321,7 +321,7 @@
}
if (audioPolicyEffects != 0) {
// release audio processors from the input
- status_t status = audioPolicyEffects->releaseInputEffects(input);
+ status_t status = audioPolicyEffects->releaseInputEffects(input, session);
if(status != NO_ERROR) {
ALOGW("Failed to release effects on input %d", input);
}
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index a9a2d3c..4e337a0 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -5,19 +5,20 @@
LOCAL_SRC_FILES := MediaExtractorService.cpp
LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
LOCAL_MODULE:= libmediaextractorservice
-LOCAL_32_BIT_ONLY := true
include $(BUILD_SHARED_LIBRARY)
# service executable
include $(CLEAR_VARS)
+# seccomp filters are defined for the following architectures:
LOCAL_REQUIRED_MODULES_arm := mediaextractor-seccomp.policy
+LOCAL_REQUIRED_MODULES_arm64 := mediaextractor-seccomp.policy
LOCAL_REQUIRED_MODULES_x86 := mediaextractor-seccomp.policy
+# TODO add seccomp filter for x86_64.
LOCAL_SRC_FILES := main_extractorservice.cpp minijail/minijail.cpp
LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils liblog libicuuc libminijail
LOCAL_STATIC_LIBRARIES := libicuandroid_utils
LOCAL_MODULE:= mediaextractor
-LOCAL_32_BIT_ONLY := true
LOCAL_INIT_RC := mediaextractor.rc
LOCAL_C_INCLUDES := frameworks/av/media/libmedia
include $(BUILD_EXECUTABLE)
diff --git a/services/mediaextractor/minijail/Android.mk b/services/mediaextractor/minijail/Android.mk
index 79c5505..3a93340 100644
--- a/services/mediaextractor/minijail/Android.mk
+++ b/services/mediaextractor/minijail/Android.mk
@@ -1,18 +1,12 @@
LOCAL_PATH := $(call my-dir)
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86 x86_64))
+# TODO add filter for x86_64
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86))
include $(CLEAR_VARS)
LOCAL_MODULE := mediaextractor-seccomp.policy
LOCAL_MODULE_CLASS := ETC
LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
-
-# mediaextractor runs in 32-bit combatibility mode. For 64 bit architectures,
-# use the 32 bit policy
-ifdef TARGET_2ND_ARCH
- LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_2ND_ARCH).policy
-else
- LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_ARCH).policy
-endif
+LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_ARCH).policy
# allow device specific additions to the syscall whitelist
ifneq (,$(wildcard $(BOARD_SECCOMP_POLICY)/mediaextractor-seccomp.policy))
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm64.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm64.policy
new file mode 100644
index 0000000..ae6ac05
--- /dev/null
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm64.policy
@@ -0,0 +1,36 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+close: 1
+dup: 1
+mmap: 1
+munmap: 1
+openat: 1
+mprotect: 1
+madvise: 1
+getuid: 1
+fstat: 1
+read: 1
+setpriority: 1
+sigaltstack: 1
+clone: 1
+lseek: 1
+newfstatat: 1
+faccessat: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+getrlimit: 1
+
+# for attaching to debuggerd on process crash
+rt_sigaction: 1
+# socket: arg0 == AF_LOCAL
+socket: arg0 == 1
+connect: 1
+rt_tgsigqueueinfo: 1
+writev: 1