Merge "Bound sizes for tx3g atom parsing per track"
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index acf6999..99b613e 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -5695,6 +5695,8 @@
* <p>In both cases, all images generated for a particular capture request still carry the same
* timestamps, so that they can be used to look up the matching frame number and
* onCaptureStarted callback.</p>
+ * <p>This tag is only applicable if the logical camera device supports concurrent physical
+ * streams from different physical cameras.</p>
*/
ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE = // byte (acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t)
ACAMERA_LOGICAL_MULTI_CAMERA_START + 1,
@@ -7581,14 +7583,23 @@
ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING = 10,
/**
- * <p>The camera device is a logical camera backed by two or more physical cameras. In
- * API level 28, the physical cameras must also be exposed to the application via
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>. Starting from API
- * level 29, some or all physical cameras may not be independently exposed to the
- * application, in which case the physical camera IDs will not be available in
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>. But the application
- * can still query the physical cameras' characteristics by calling
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraCharacteristics">CameraManager#getCameraCharacteristics</a>.</p>
+ * <p>The camera device is a logical camera backed by two or more physical cameras.</p>
+ * <p>In API level 28, the physical cameras must also be exposed to the application via
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>.</p>
+ * <p>Starting from API level 29, some or all physical cameras may not be independently
+ * exposed to the application, in which case the physical camera IDs will not be
+ * available in <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>. But the
+ * application can still query the physical cameras' characteristics by calling
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraCharacteristics">CameraManager#getCameraCharacteristics</a>. Additionally,
+ * if a physical camera is hidden from camera ID list, the mandatory stream combinations
+ * for that physical camera must be supported through the logical camera using physical
+ * streams.</p>
+ * <p>Combinations of logical and physical streams, or physical streams from different
+ * physical cameras are not guaranteed. However, if the camera device supports
+ * {@link ACameraDevice_isSessionConfigurationSupported },
+ * application must be able to query whether a stream combination involving physical
+ * streams is supported by calling
+ * {@link ACameraDevice_isSessionConfigurationSupported }.</p>
* <p>Camera application shouldn't assume that there are at most 1 rear camera and 1 front
* camera in the system. For an application that switches between front and back cameras,
* the recommendation is to switch between the first rear camera and the first front
@@ -7613,24 +7624,6 @@
* the same.</li>
* <li>The logical camera must be LIMITED or higher device.</li>
* </ul>
- * <p>Both the logical camera device and its underlying physical devices support the
- * mandatory stream combinations required for their device levels.</p>
- * <p>Additionally, for each guaranteed stream combination, the logical camera supports:</p>
- * <ul>
- * <li>For each guaranteed stream combination, the logical camera supports replacing one
- * logical {@link AIMAGE_FORMAT_YUV_420_888 YUV_420_888}
- * or raw stream with two physical streams of the same size and format, each from a
- * separate physical camera, given that the size and format are supported by both
- * physical cameras.</li>
- * <li>If the logical camera doesn't advertise RAW capability, but the underlying physical
- * cameras do, the logical camera will support guaranteed stream combinations for RAW
- * capability, except that the RAW streams will be physical streams, each from a separate
- * physical camera. This is usually the case when the physical cameras have different
- * sensor sizes.</li>
- * </ul>
- * <p>Using physical streams in place of a logical stream of the same size and format will
- * not slow down the frame rate of the capture, as long as the minimum frame duration
- * of the physical and logical streams are the same.</p>
* <p>A logical camera device's dynamic metadata may contain
* ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID to notify the application of the current
* active physical camera Id. An active physical camera is the physical camera from which
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index bffab22..b18c897 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -300,7 +300,6 @@
OutputConfigurationWrapper outConfigW;
OutputConfiguration &outConfig = outConfigW.mOutputConfiguration;
outConfig.rotation = utils::convertToHidl(output->mRotation);
- outConfig.windowGroupId = -1; // ndk doesn't support inter OutputConfiguration buffer sharing.
outConfig.windowHandles.resize(output->mSharedWindows.size() + 1);
outConfig.windowHandles[0] = output->mWindow;
outConfig.physicalCameraId = output->mPhysicalCameraId;
diff --git a/camera/ndk/ndk_vendor/impl/utils.h b/camera/ndk/ndk_vendor/impl/utils.h
index 2f1006d..a03c7bc 100644
--- a/camera/ndk/ndk_vendor/impl/utils.h
+++ b/camera/ndk/ndk_vendor/impl/utils.h
@@ -99,7 +99,15 @@
return mOutputConfiguration;
}
- OutputConfigurationWrapper() = default;
+ OutputConfigurationWrapper() {
+ mOutputConfiguration.rotation = OutputConfiguration::Rotation::R0;
+ // The ndk currently doesn't support deferred surfaces
+ mOutputConfiguration.isDeferred = false;
+ mOutputConfiguration.width = 0;
+ mOutputConfiguration.height = 0;
+ // ndk doesn't support inter OutputConfiguration buffer sharing.
+ mOutputConfiguration.windowGroupId = -1;
+ };
OutputConfigurationWrapper(OutputConfiguration &outputConfiguration)
: mOutputConfiguration((outputConfiguration)) { }
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 32566ca..5a31c58 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -82,7 +82,7 @@
const Trex *trex,
off64_t firstMoofOffset,
const sp<ItemTable> &itemTable,
- int32_t elstShiftStartTicks);
+ uint64_t elstShiftStartTicks);
virtual status_t init();
virtual media_status_t start();
@@ -148,7 +148,7 @@
// Start offset from composition time to presentation time.
// Support shift only for video tracks through mElstShiftStartTicks for now.
- int32_t mElstShiftStartTicks;
+ uint64_t mElstShiftStartTicks;
size_t parseNALSize(const uint8_t *data) const;
status_t parseChunk(off64_t *offset);
@@ -4059,7 +4059,7 @@
if (track->has_elst and !strncasecmp("video/", mime, 6) and track->elst_media_time > 0) {
track->elstShiftStartTicks = track->elst_media_time;
- ALOGV("video track->elstShiftStartTicks :%" PRId64, track->elst_media_time);
+ ALOGV("video track->elstShiftStartTicks :%" PRIu64, track->elstShiftStartTicks);
}
MPEG4Source *source = new MPEG4Source(
@@ -4469,7 +4469,7 @@
const Trex *trex,
off64_t firstMoofOffset,
const sp<ItemTable> &itemTable,
- int32_t elstShiftStartTicks)
+ uint64_t elstShiftStartTicks)
: mFormat(format),
mDataSource(dataSource),
mTimescale(timeScale),
@@ -4595,7 +4595,7 @@
// Start offset should be less or equal to composition time of first sample.
// ISO : sample_composition_time_offset, version 0 (unsigned) for major brands.
mElstShiftStartTicks = std::min(mElstShiftStartTicks,
- (*mCurrentSamples.begin()).compositionOffset);
+ (uint64_t)(*mCurrentSamples.begin()).compositionOffset);
}
return err;
}
@@ -4605,7 +4605,7 @@
err = mSampleTable->getMetaDataForSample(0, NULL, NULL, &firstSampleCTS);
// Start offset should be less or equal to composition time of first sample.
// Composition time stamp of first sample cannot be negative.
- mElstShiftStartTicks = std::min(mElstShiftStartTicks, (int32_t)firstSampleCTS);
+ mElstShiftStartTicks = std::min(mElstShiftStartTicks, firstSampleCTS);
}
return err;
@@ -5258,8 +5258,30 @@
sampleCtsOffset = 0;
}
- if (size < (off64_t)sampleCount * bytesPerSample) {
- return -EINVAL;
+ if (bytesPerSample != 0) {
+ if (size < (off64_t)sampleCount * bytesPerSample) {
+ return -EINVAL;
+ }
+ } else {
+ if (sampleDuration == 0) {
+ ALOGW("b/123389881 sampleDuration == 0");
+ android_errorWriteLog(0x534e4554, "124389881 zero");
+ return -EINVAL;
+ }
+
+ // apply some sanity (vs strict legality) checks
+ //
+ // clamp the count of entries in the trun box, to avoid spending forever parsing
+ // this box. Clamping (vs error) lets us play *something*.
+ // 1 million is about 400 msecs on a Pixel3, should be no more than a couple seconds
+ // on the slowest devices.
+ static constexpr uint32_t kMaxTrunSampleCount = 1000000;
+ if (sampleCount > kMaxTrunSampleCount) {
+ ALOGW("b/123389881 clamp sampleCount(%u) @ kMaxTrunSampleCount(%u)",
+ sampleCount, kMaxTrunSampleCount);
+ android_errorWriteLog(0x534e4554, "124389881 count");
+
+ }
}
Sample tmp;
@@ -5515,7 +5537,11 @@
err = mSampleTable->getMetaDataForSample(
mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample, &stts);
if(err == OK) {
- cts -= mElstShiftStartTicks;
+ /* Composition Time Stamp cannot be negative. Some files have video Sample
+ * Time(STTS)delta with zero value(b/117402420). Hence subtract only
+ * min(cts, mElstShiftStartTicks), so that audio tracks can be played.
+ */
+ cts -= std::min(cts, mElstShiftStartTicks);
}
} else {
@@ -5799,8 +5825,8 @@
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
seekTimeUs += ((long double)mElstShiftStartTicks * 1000000) / mTimescale;
- ALOGV("shifted seekTimeUs :%" PRId64 ", mElstShiftStartTicks:%" PRId32, seekTimeUs,
- mElstShiftStartTicks);
+ ALOGV("shifted seekTimeUs :%" PRId64 ", mElstShiftStartTicks:%" PRIu64, seekTimeUs,
+ mElstShiftStartTicks);
int numSidxEntries = mSegments.size();
if (numSidxEntries != 0) {
@@ -5856,7 +5882,7 @@
off64_t offset = 0;
size_t size = 0;
- uint32_t cts = 0;
+ uint64_t cts = 0;
bool isSyncSample = false;
bool newBuffer = false;
if (mBuffer == NULL || mCurrentSampleIndex >= mCurrentSamples.size()) {
@@ -5888,7 +5914,11 @@
offset = smpl->offset;
size = smpl->size;
cts = mCurrentTime + smpl->compositionOffset;
- cts -= mElstShiftStartTicks;
+ /* Composition Time Stamp cannot be negative. Some files have video Sample
+ * Time(STTS)delta with zero value(b/117402420). Hence subtract only
+ * min(cts, mElstShiftStartTicks), so that audio tracks can be played.
+ */
+ cts -= std::min(cts, mElstShiftStartTicks);
mCurrentTime += smpl->duration;
isSyncSample = (mCurrentSampleIndex == 0);
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 83fc4fe..031e793 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -83,9 +83,13 @@
bool includes_expensive_metadata;
bool skipTrack;
bool has_elst;
+ /* signed int, ISO Spec allows media_time = -1 for other use cases.
+ * but we don't support empty edits for now.
+ */
int64_t elst_media_time;
uint64_t elst_segment_duration;
- int32_t elstShiftStartTicks;
+ // unsigned int, shift start offset only when media_time > 0.
+ uint64_t elstShiftStartTicks;
bool subsample_encryption;
uint8_t *mTx3gBuffer;
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index cb243a0..958bb2e 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -196,3 +196,10 @@
"libutils",
],
}
+
+cc_test {
+ name: "test_full_queue",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_full_queue.cpp"],
+ shared_libs: ["libaaudio"],
+}
diff --git a/media/libaaudio/tests/test_full_queue.cpp b/media/libaaudio/tests/test_full_queue.cpp
new file mode 100644
index 0000000..12d4fa3
--- /dev/null
+++ b/media/libaaudio/tests/test_full_queue.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Test whether a stream dies if it is written to after a delay.
+// Maybe because the message queue from the AAudio service fills up.
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+#include <gtest/gtest.h>
+
+constexpr int64_t kNanosPerSecond = 1000000000;
+constexpr int64_t kTimeoutNanos = kNanosPerSecond / 2;
+constexpr int kNumFrames = 256;
+constexpr int kChannelCount = 2;
+
+static void checkFullQueue(aaudio_performance_mode_t perfMode,
+ int32_t sleepMillis) {
+ std::unique_ptr<float[]> buffer = std::make_unique<float[]>(
+ kNumFrames * kChannelCount);
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ AAudioStreamBuilder_setChannelCount(aaudioBuilder, kChannelCount);
+
+ // Request stream properties.
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+
+ // Create an AAudioStream using the Builder.
+ AAudioStream *aaudioStream = nullptr;
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder,
+ &aaudioStream));
+ AAudioStreamBuilder_delete(aaudioBuilder);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+ // Sleep for awhile. This might kill the stream.
+ usleep(sleepMillis * 1000); // 1000 millis in a microsecond
+
+ for (int i = 0; i < 10; i++) {
+ const aaudio_result_t result = AAudioStream_write(aaudioStream,
+ buffer.get(),
+ kNumFrames,
+ kTimeoutNanos);
+ EXPECT_EQ(kNumFrames, result);
+ if (kNumFrames != result) break;
+ }
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
+
+TEST(test_full_queue, aaudio_full_queue_perf_none_50) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_NONE, 50 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_perf_none_200) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_NONE, 200 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_perf_none_1000) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_NONE, 1000 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_low_latency_50) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, 50 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_low_latency_200) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, 200 /* sleepMillis */);
+}
+
+TEST(test_full_queue, aaudio_full_queue_low_latency_1000) {
+ checkFullQueue(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, 1000 /* sleepMillis */);
+}
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 6c8e6a4..c08dddb 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -49,7 +49,7 @@
// Instrument audio signal power logging.
// Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+ if (mStream != nullptr /* && mStreamPowerLog.isUserDebugOrEngBuild() */) {
// Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
Return<void> ret = mStream->getAudioProperties(
[&](auto sr, auto m, auto f) {
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index 7d5ce05..4818fd8 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -33,7 +33,7 @@
mStream(stream) {
// Instrument audio signal power logging.
// Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+ if (mStream != nullptr /* && mStreamPowerLog.isUserDebugOrEngBuild() */) {
mStreamPowerLog.init(mStream->get_sample_rate(mStream),
mStream->get_channels(mStream),
mStream->get_format(mStream));
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index 4dece96..50826c5 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -368,13 +368,13 @@
ALOGV("Use shared memory: %zu", length);
transferBuf = buf;
} else {
- ALOGD("Large buffer %zu without IMemory!", length);
+ ALOGV("Large buffer %zu without IMemory!", length);
ret = mGroup->acquire_buffer(
(MediaBufferBase **)&transferBuf, false /* nonBlocking */, length);
if (ret != OK
|| transferBuf == nullptr
|| transferBuf->mMemory == nullptr) {
- ALOGW("Failed to acquire shared memory, size %zu, ret %d",
+ ALOGV("Failed to acquire shared memory, size %zu, ret %d",
length, ret);
if (transferBuf != nullptr) {
transferBuf->release();
diff --git a/media/libmedia/include/media/IMediaSource.h b/media/libmedia/include/media/IMediaSource.h
index 5ab6e37..381df24 100644
--- a/media/libmedia/include/media/IMediaSource.h
+++ b/media/libmedia/include/media/IMediaSource.h
@@ -124,7 +124,8 @@
return false;
}
- static const size_t kBinderMediaBuffers = 4; // buffers managed by BnMediaSource
+ // align buffer count with video request size in NuMediaExtractor::selectTrack()
+ static const size_t kBinderMediaBuffers = 8; // buffers managed by BnMediaSource
static const size_t kTransferSharedAsSharedThreshold = 4 * 1024; // if >= shared, else inline
static const size_t kTransferInlineAsSharedThreshold = 8 * 1024; // if >= shared, else inline
static const size_t kInlineMaxTransfer = 64 * 1024; // Binder size limited to BINDER_VM_SIZE.
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 0156ad2..a2cc13e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -328,6 +328,11 @@
const size_t *userData = (size_t *)mpegUserData->data();
for (size_t i = 0; i < mpegUserData->size() / sizeof(size_t); ++i) {
+ if (accessUnit->size() < userData[i]) {
+ ALOGW("b/129068792, skip invalid offset for user data");
+ android_errorWriteLog(0x534e4554, "129068792");
+ continue;
+ }
trackAdded |= parseMPEGUserDataUnit(
timeUs, accessUnit->data() + userData[i], accessUnit->size() - userData[i]);
}
@@ -337,6 +342,12 @@
// returns true if a new CC track is found
bool NuPlayer::CCDecoder::parseMPEGUserDataUnit(int64_t timeUs, const uint8_t *data, size_t size) {
+ if (size < 9) {
+ ALOGW("b/129068792, MPEG user data size too small %zu", size);
+ android_errorWriteLog(0x534e4554, "129068792");
+ return false;
+ }
+
ABitReader br(data + 4, 5);
uint32_t user_identifier = br.getBits(32);
@@ -389,8 +400,14 @@
mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
br.skipBits(16);
} else if (mDTVCCPacket->size() > 0 && cc_type == 2) {
- memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
- mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+ if (mDTVCCPacket->capacity() - mDTVCCPacket->size() >= 2) {
+ memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
+ mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+ } else {
+ ALOGW("b/129068792, skip CC due to too much data(%zu, %zu)",
+ mDTVCCPacket->capacity(), mDTVCCPacket->size());
+ android_errorWriteLog(0x534e4554, "129068792");
+ }
br.skipBits(16);
} else if (cc_type == 0 || cc_type == 1) {
uint8_t cc_data_1 = br.getBits(8) & 0x7f;
@@ -477,6 +494,11 @@
size_t trackIndex = getTrackIndex(kTrackTypeCEA708, service_number, &trackAdded);
if (mSelectedTrack == (ssize_t)trackIndex) {
sp<ABuffer> ccPacket = new ABuffer(block_size);
+ if (ccPacket->capacity() == 0) {
+ ALOGW("b/129068792, no memory available, %zu", block_size);
+ android_errorWriteLog(0x534e4554, "129068792");
+ return false;
+ }
memcpy(ccPacket->data(), br.data(), block_size);
mCCMap.add(timeUs, ccPacket);
}
diff --git a/media/libstagefright/DataURISource.cpp b/media/libstagefright/DataURISource.cpp
index 3dc345f..b975b38 100644
--- a/media/libstagefright/DataURISource.cpp
+++ b/media/libstagefright/DataURISource.cpp
@@ -13,7 +13,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
#include <media/stagefright/DataURISource.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -39,15 +38,27 @@
AString tmp(&uri[5], commaPos - &uri[5]);
if (tmp.endsWith(";base64")) {
- AString encoded(commaPos + 1);
- // Strip CR and LF...
- for (size_t i = encoded.size(); i > 0;) {
- i--;
- if (encoded.c_str()[i] == '\r' || encoded.c_str()[i] == '\n') {
- encoded.erase(i, 1);
+ // strip all CR and LF characters.
+ const char *src = commaPos+1;
+ int len = strlen(src) + 1;
+ char *cleansed = (char *) malloc(len);
+ if (cleansed == NULL) return NULL;
+ char *keeping = cleansed;
+ int left = len;
+ for (int i = 0; i < len ; i++)
+ {
+ const char c = *src++;
+ if (c == '\r' || c == '\n') {
+ continue;
}
+ *keeping++ = c;
+ left--;
}
+ memset(keeping, 0, left);
+
+ AString encoded(cleansed);
+ free(cleansed);
buffer = decodeBase64(encoded);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index d4e4000..b6b7784 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1052,8 +1052,9 @@
}
// Prevent possible integer overflow in downstream code.
- if ((uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
- ALOGE("buffer size is too big, width=%d, height=%d", mVideoWidth, mVideoHeight);
+ if (mVideoWidth < 0 || mVideoHeight < 0 ||
+ (uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
+ ALOGE("Invalid size(s), width=%d, height=%d", mVideoWidth, mVideoHeight);
return BAD_VALUE;
}
}
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index c3d85ee..50e454c 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -831,7 +831,9 @@
}
void MediaCodecSource::onPause(int64_t pauseStartTimeUs) {
- if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
+ if (mStopping || mOutput.lock()->mEncoderReachedEOS) {
+ // Nothing to do
+ } else if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
sp<AMessage> params = new AMessage;
params->setInt32(PARAMETER_KEY_SUSPEND, true);
params->setInt64(PARAMETER_KEY_SUSPEND_TIME, pauseStartTimeUs);
diff --git a/media/libstagefright/data/media_codecs_google_audio.xml b/media/libstagefright/data/media_codecs_google_audio.xml
index 632088a..8899adc 100644
--- a/media/libstagefright/data/media_codecs_google_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_audio.xml
@@ -58,7 +58,7 @@
</MediaCodec>
<MediaCodec name="OMX.google.raw.decoder" type="audio/raw">
<Limit name="channel-count" max="8" />
- <Limit name="sample-rate" ranges="8000-96000" />
+ <Limit name="sample-rate" ranges="8000-192000" />
<Limit name="bitrate" range="1-10000000" />
</MediaCodec>
<MediaCodec name="OMX.google.flac.decoder" type="audio/flac">
diff --git a/media/libstagefright/data/media_codecs_google_c2_audio.xml b/media/libstagefright/data/media_codecs_google_c2_audio.xml
index 47a9715..be2404d 100644
--- a/media/libstagefright/data/media_codecs_google_c2_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_audio.xml
@@ -67,7 +67,7 @@
<MediaCodec name="c2.android.raw.decoder" type="audio/raw">
<Alias name="OMX.google.raw.decoder" />
<Limit name="channel-count" max="8" />
- <Limit name="sample-rate" ranges="8000-96000" />
+ <Limit name="sample-rate" ranges="8000-192000" />
<Limit name="bitrate" range="1-10000000" />
</MediaCodec>
<MediaCodec name="c2.android.flac.decoder" type="audio/flac">
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index ec7ff57..96a8337 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -398,6 +398,13 @@
if (!moduleDevices.contains(device)) {
continue;
}
+
+ // removal of remote submix devices associated with a dynamic policy is
+ // handled by removeOutputProfile() and removeInputProfile()
+ if (audio_is_remote_submix_device(device->type()) && device->address() != "0") {
+ continue;
+ }
+
device->detach();
// Only remove from dynamic list, not from declared list!!!
if (!hwModule->getDynamicDevices().contains(device)) {
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index b16b5dc..880a3d7 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -343,6 +343,20 @@
return writeUpMessageQueue(&command);
}
+bool AAudioServiceStreamBase::isUpMessageQueueBusy() {
+ std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
+ if (mUpMessageQueue == nullptr) {
+ ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
+ return true;
+ }
+ int32_t framesAvailable = mUpMessageQueue->getFifoBuffer()
+ ->getFullFramesAvailable();
+ int32_t capacity = mUpMessageQueue->getFifoBuffer()
+ ->getBufferCapacityInFrames();
+ // Is it half full or more
+ return framesAvailable >= (capacity / 2);
+}
+
aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
if (mUpMessageQueue == nullptr) {
@@ -366,6 +380,13 @@
aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
AAudioServiceMessage command;
+ // It is not worth filling up the queue with timestamps.
+ // That can cause the stream to get suspended.
+ // So just drop the timestamp if the queue is getting full.
+ if (isUpMessageQueueBusy()) {
+ return AAUDIO_OK;
+ }
+
// Send a timestamp for the clock model.
aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
&command.timestamp.timestamp);
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index ffc768b..097bc64 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -312,6 +312,12 @@
android::wp<AAudioServiceEndpoint> mServiceEndpointWeak;
private:
+
+ /**
+ * @return true if the queue is getting full.
+ */
+ bool isUpMessageQueueBusy();
+
aaudio_handle_t mHandle = -1;
bool mFlowing = false;