Merge "AudioPolicyService: Acquire mutex for SoundTriggerSession" into mnc-dev am: e472e51100 am: 0a8a0c4474 am: ae465be454 am: b995c31415 am: d08327d1a2
am: 74f84d987b
Change-Id: Ie2c1acfdc523c6774dd2f90e0b42a9f274731d57
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index fb43708..0d689a6 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -37,7 +37,7 @@
mMetadata.clear();
mSurfaceList.clear();
- status_t err;
+ status_t err = OK;
if ((err = mMetadata.readFromParcel(parcel)) != OK) {
ALOGE("%s: Failed to read metadata from parcel", __FUNCTION__);
@@ -65,19 +65,16 @@
}
// Surface.writeToParcel
- const char16_t* name = parcel->readString16Inplace(&len);
- ALOGV("%s: Read surface name = %s", __FUNCTION__,
- name != NULL ? String8(name).string() : "<null>");
- sp<IBinder> binder(parcel->readStrongBinder());
- ALOGV("%s: Read surface binder = %p",
- __FUNCTION__, binder.get());
+ view::Surface surfaceShim;
+ if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to read output target Surface %d from parcel: %s (%d)",
+ __FUNCTION__, i, strerror(-err), err);
+ return err;
+ }
sp<Surface> surface;
-
- if (binder != NULL) {
- sp<IGraphicBufferProducer> gbp =
- interface_cast<IGraphicBufferProducer>(binder);
- surface = new Surface(gbp);
+ if (surfaceShim.graphicBufferProducer != NULL) {
+ surface = new Surface(surfaceShim.graphicBufferProducer);
}
mSurfaceList.push_back(surface);
@@ -99,7 +96,7 @@
return BAD_VALUE;
}
- status_t err;
+ status_t err = OK;
if ((err = mMetadata.writeToParcel(parcel)) != OK) {
return err;
@@ -111,20 +108,18 @@
parcel->writeInt32(size);
for (int32_t i = 0; i < size; ++i) {
- sp<Surface> surface = mSurfaceList[i];
-
- sp<IBinder> binder;
- if (surface != 0) {
- binder = IInterface::asBinder(surface->getIGraphicBufferProducer());
- }
-
// not sure if readParcelableArray does this, hard to tell from source
parcel->writeString16(String16("android.view.Surface"));
// Surface.writeToParcel
- parcel->writeString16(String16("unknown_name"));
- // Surface.nativeWriteToParcel
- parcel->writeStrongBinder(binder);
+ view::Surface surfaceShim;
+ surfaceShim.name = String16("unknown_name");
+ surfaceShim.graphicBufferProducer = mSurfaceList[i]->getIGraphicBufferProducer();
+ if ((err = surfaceShim.writeToParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to write output target Surface %d to parcel: %s (%d)",
+ __FUNCTION__, i, strerror(-err), err);
+ return err;
+ }
}
parcel->writeInt32(mIsReprocess ? 1 : 0);
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
index 40275cf..d27956c 100644
--- a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
@@ -24,6 +24,7 @@
#include "DrmPlugin.h"
#include "ClearKeyUUID.h"
+#include "MimeType.h"
#include "SessionLibrary.h"
namespace clearkeydrm {
@@ -32,10 +33,14 @@
return isClearKeyUUID(uuid);
}
-bool DrmFactory::isContentTypeSupported(const android::String8 &initDataType) {
+bool DrmFactory::isContentTypeSupported(const android::String8 &type) {
// This should match the types handed by InitDataParser.
- return initDataType == "cenc" ||
- initDataType == "webm";
+ return type == kIsoBmffVideoMimeType ||
+ type == kIsoBmffAudioMimeType ||
+ type == kCencInitDataFormat ||
+ type == kWebmVideoMimeType ||
+ type == kWebmAudioMimeType ||
+ type == kWebmInitDataFormat;
}
android::status_t DrmFactory::createDrmPlugin(
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.h b/drm/mediadrm/plugins/clearkey/DrmFactory.h
index 164d3d0..87db982 100644
--- a/drm/mediadrm/plugins/clearkey/DrmFactory.h
+++ b/drm/mediadrm/plugins/clearkey/DrmFactory.h
@@ -32,7 +32,7 @@
virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]);
- virtual bool isContentTypeSupported(const android::String8 &initDataType);
+ virtual bool isContentTypeSupported(const android::String8 &mimeType);
virtual android::status_t createDrmPlugin(
const uint8_t uuid[16], android::DrmPlugin** plugin);
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index e5ee403..86bf047 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -46,7 +46,7 @@
status_t DrmPlugin::getKeyRequest(
const Vector<uint8_t>& scope,
const Vector<uint8_t>& initData,
- const String8& initDataType,
+ const String8& mimeType,
KeyType keyType,
const KeyedVector<String8, String8>& optionalParameters,
Vector<uint8_t>& request,
@@ -62,7 +62,7 @@
if (!session.get()) {
return android::ERROR_DRM_SESSION_NOT_OPENED;
}
- return session->getKeyRequest(initData, initDataType, &request);
+ return session->getKeyRequest(initData, mimeType, &request);
}
status_t DrmPlugin::provideKeyResponse(
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index 9095045..efb9f8b 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -49,7 +49,7 @@
virtual status_t getKeyRequest(
const Vector<uint8_t>& scope,
- const Vector<uint8_t>& initData,
+ const Vector<uint8_t>& mimeType,
const String8& initDataType,
KeyType keyType,
const KeyedVector<String8, String8>& optionalParameters,
diff --git a/drm/mediadrm/plugins/clearkey/InitDataParser.cpp b/drm/mediadrm/plugins/clearkey/InitDataParser.cpp
index c9c2a38..6a4f8d5 100644
--- a/drm/mediadrm/plugins/clearkey/InitDataParser.cpp
+++ b/drm/mediadrm/plugins/clearkey/InitDataParser.cpp
@@ -27,6 +27,7 @@
#include "InitDataParser.h"
#include "ClearKeyUUID.h"
+#include "MimeType.h"
#include "Utils.h"
namespace clearkeydrm {
@@ -41,16 +42,20 @@
}
android::status_t InitDataParser::parse(const Vector<uint8_t>& initData,
- const String8& initDataType,
+ const String8& type,
Vector<uint8_t>* licenseRequest) {
// Build a list of the key IDs
Vector<const uint8_t*> keyIds;
- if (initDataType == "cenc") {
+ if (type == kIsoBmffVideoMimeType ||
+ type == kIsoBmffAudioMimeType ||
+ type == kCencInitDataFormat) {
android::status_t res = parsePssh(initData, &keyIds);
if (res != android::OK) {
return res;
}
- } else if (initDataType == "webm") {
+ } else if (type == kWebmVideoMimeType ||
+ type == kWebmAudioMimeType ||
+ type == kWebmInitDataFormat) {
// WebM "init data" is just a single key ID
if (initData.size() != kKeyIdSize) {
return android::ERROR_DRM_CANNOT_HANDLE;
diff --git a/drm/mediadrm/plugins/clearkey/InitDataParser.h b/drm/mediadrm/plugins/clearkey/InitDataParser.h
index 9505d2a..a9707bf 100644
--- a/drm/mediadrm/plugins/clearkey/InitDataParser.h
+++ b/drm/mediadrm/plugins/clearkey/InitDataParser.h
@@ -29,7 +29,7 @@
InitDataParser() {}
android::status_t parse(const android::Vector<uint8_t>& initData,
- const android::String8& initDataType,
+ const android::String8& type,
android::Vector<uint8_t>* licenseRequest);
private:
diff --git a/drm/mediadrm/plugins/clearkey/MimeType.h b/drm/mediadrm/plugins/clearkey/MimeType.h
new file mode 100644
index 0000000..085f17a
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/MimeType.h
@@ -0,0 +1,15 @@
+#ifndef CLEARKEY_MIMETYPE_H_
+#define CLEARKEY_MIMETYPE_H_
+
+#include <utils/String8.h>
+
+namespace {
+ const android::String8 kCencInitDataFormat("cenc");
+ const android::String8 kIsoBmffAudioMimeType("audio/mp4");
+ const android::String8 kIsoBmffVideoMimeType("video/mp4");
+ const android::String8 kWebmInitDataFormat("webm");
+ const android::String8 kWebmAudioMimeType("audio/webm");
+ const android::String8 kWebmVideoMimeType("video/webm");
+}
+
+#endif // CLEARKEY_MIMETYPE_H_
diff --git a/drm/mediadrm/plugins/clearkey/Session.cpp b/drm/mediadrm/plugins/clearkey/Session.cpp
index 95016f5..d210f5e 100644
--- a/drm/mediadrm/plugins/clearkey/Session.cpp
+++ b/drm/mediadrm/plugins/clearkey/Session.cpp
@@ -36,10 +36,10 @@
status_t Session::getKeyRequest(
const Vector<uint8_t>& initData,
- const String8& initDataType,
+ const String8& mimeType,
Vector<uint8_t>* keyRequest) const {
InitDataParser parser;
- return parser.parse(initData, initDataType, keyRequest);
+ return parser.parse(initData, mimeType, keyRequest);
}
status_t Session::provideKeyResponse(const Vector<uint8_t>& response) {
diff --git a/drm/mediadrm/plugins/clearkey/Session.h b/drm/mediadrm/plugins/clearkey/Session.h
index cab0dc3..0933506 100644
--- a/drm/mediadrm/plugins/clearkey/Session.h
+++ b/drm/mediadrm/plugins/clearkey/Session.h
@@ -38,7 +38,7 @@
const android::Vector<uint8_t>& sessionId() const { return mSessionId; }
android::status_t getKeyRequest(
- const android::Vector<uint8_t>& initData,
+ const android::Vector<uint8_t>& mimeType,
const android::String8& initDataType,
android::Vector<uint8_t>* keyRequest) const;
diff --git a/drm/mediadrm/plugins/clearkey/tests/InitDataParserUnittest.cpp b/drm/mediadrm/plugins/clearkey/tests/InitDataParserUnittest.cpp
index 4ba65ed..e275108 100644
--- a/drm/mediadrm/plugins/clearkey/tests/InitDataParserUnittest.cpp
+++ b/drm/mediadrm/plugins/clearkey/tests/InitDataParserUnittest.cpp
@@ -30,27 +30,27 @@
namespace {
const size_t kKeyIdSize = 16;
- const String8 kCencType("cenc");
- const String8 kWebMType("webm");
+ const String8 kCencMimeType("video/mp4");
+ const String8 kWebmMimeType("video/webm");
const String8 kBase64Padding("=");
}
class InitDataParserTest : public ::testing::Test {
protected:
status_t attemptParse(const Vector<uint8_t>& initData,
- const String8& initDataType,
+ const String8& mimeType,
Vector<uint8_t>* licenseRequest) {
InitDataParser parser;
- return parser.parse(initData, initDataType, licenseRequest);
+ return parser.parse(initData, mimeType, licenseRequest);
}
void attemptParseExpectingSuccess(const Vector<uint8_t>& initData,
- const String8& initDataType,
+ const String8& mimeType,
const Vector<String8>& expectedKeys) {
const String8 kRequestPrefix("{\"kids\":[");
const String8 kRequestSuffix("],\"type\":\"temporary\"}");
Vector<uint8_t> request;
- ASSERT_EQ(android::OK, attemptParse(initData, initDataType, &request));
+ ASSERT_EQ(android::OK, attemptParse(initData, mimeType, &request));
String8 requestString(reinterpret_cast<const char*>(request.array()),
request.size());
@@ -68,9 +68,9 @@
}
void attemptParseExpectingFailure(const Vector<uint8_t>& initData,
- const String8& initDataType) {
+ const String8& mimeType) {
Vector<uint8_t> request;
- ASSERT_NE(android::OK, attemptParse(initData, initDataType, &request));
+ ASSERT_NE(android::OK, attemptParse(initData, mimeType, &request));
EXPECT_EQ(0, request.size());
}
};
@@ -93,7 +93,7 @@
Vector<String8> expectedKeys;
expectedKeys.push(String8("01234567890ABCDE"));
- attemptParseExpectingSuccess(initData, kCencType, expectedKeys);
+ attemptParseExpectingSuccess(initData, kCencMimeType, expectedKeys);
}
TEST_F(InitDataParserTest, ParsesMultipleKeyPssh) {
@@ -120,7 +120,7 @@
expectedKeys.push(String8("ClearKeyClearKey"));
expectedKeys.push(String8(" GOOGLE GOOGLE "));
- attemptParseExpectingSuccess(initData, kCencType, expectedKeys);
+ attemptParseExpectingSuccess(initData, kCencMimeType, expectedKeys);
}
TEST_F(InitDataParserTest, ParsesWebM) {
@@ -134,7 +134,7 @@
Vector<String8> expectedKeys;
expectedKeys.push(String8("01234567890ABCDE"));
- attemptParseExpectingSuccess(initData, kWebMType, expectedKeys);
+ attemptParseExpectingSuccess(initData, kWebmMimeType, expectedKeys);
}
TEST_F(InitDataParserTest, FailsForPsshTooSmall) {
@@ -147,7 +147,7 @@
Vector<uint8_t> initData;
initData.appendArray(pssh, 16);
- attemptParseExpectingFailure(initData, kCencType);
+ attemptParseExpectingFailure(initData, kCencMimeType);
}
TEST_F(InitDataParserTest, FailsForWebMTooSmall) {
@@ -157,7 +157,7 @@
Vector<uint8_t> initData;
initData.appendArray(initDataRaw, 8);
- attemptParseExpectingFailure(initData, kWebMType);
+ attemptParseExpectingFailure(initData, kWebmMimeType);
}
TEST_F(InitDataParserTest, FailsForPsshBadSystemId) {
@@ -175,7 +175,7 @@
Vector<uint8_t> initData;
initData.appendArray(pssh, 52);
- attemptParseExpectingFailure(initData, kCencType);
+ attemptParseExpectingFailure(initData, kCencMimeType);
}
TEST_F(InitDataParserTest, FailsForPsshBadSize) {
@@ -193,7 +193,7 @@
Vector<uint8_t> initData;
initData.appendArray(pssh, 52);
- attemptParseExpectingFailure(initData, kCencType);
+ attemptParseExpectingFailure(initData, kCencMimeType);
}
TEST_F(InitDataParserTest, FailsForPsshWrongVersion) {
@@ -211,7 +211,7 @@
Vector<uint8_t> initData;
initData.appendArray(pssh, 52);
- attemptParseExpectingFailure(initData, kCencType);
+ attemptParseExpectingFailure(initData, kCencMimeType);
}
TEST_F(InitDataParserTest, FailsForPsshBadKeyCount) {
@@ -229,7 +229,7 @@
Vector<uint8_t> initData;
initData.appendArray(pssh, 52);
- attemptParseExpectingFailure(initData, kCencType);
+ attemptParseExpectingFailure(initData, kCencMimeType);
}
} // namespace clearkeydrm
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index 75515ac..9fd5f61 100644
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -193,8 +193,8 @@
TONE_JAPAN_DIAL, // Dial tone: 400Hz, continuous
TONE_JAPAN_BUSY, // Busy tone: 400Hz, 500ms ON, 500ms OFF...
TONE_JAPAN_RADIO_ACK, // Radio path acknowlegment: 400Hz, 1s ON, 2s OFF...
- // UK Supervisory tones
- TONE_UK_RINGTONE, // Ring Tone: A 400Hz + 450Hz tone repeated in a 0.4s on, 0.2s off, 0.4s on, 2.0s off pattern.
+ // GB Supervisory tones
+ TONE_GB_RINGTONE, // Ring Tone: A 400Hz + 450Hz tone repeated in a 0.4s on, 0.2s off, 0.4s on, 2.0s off pattern.
// AUSTRALIA Supervisory tones
TONE_AUSTRALIA_RINGTONE, // Ring tone: A 400Hz + 450Hz tone repeated in a 0.4s on, 0.2s off, 0.4s on, 2.0s off pattern.
TONE_AUSTRALIA_BUSY, // Busy tone: 425 Hz repeated in a 0.375s on, 0.375s off pattern.
@@ -206,7 +206,7 @@
enum region {
ANSI,
JAPAN,
- UK,
+ GB,
AUSTRALIA,
CEPT,
NUM_REGIONS
@@ -313,7 +313,7 @@
short mA1_Q14; // Q14 coefficient
// delay line of full amplitude generator
- short mS1, mS2; // delay line S2 oldest
+ long mS1, mS2; // delay line S2 oldest
short mS2_0; // saved value for reinitialisation
short mAmplitude_Q15; // Q15 amplitude
};
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index ec0dad5..7bb9e8b 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -95,8 +95,7 @@
// install a callback to receive periodic captures. The capture rate is specified in milliHertz
// and the capture format is according to flags (see callback_flags).
- status_t setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate,
- bool force = false);
+ status_t setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate);
// set the capture size capture size must be a power of two in the range
// [VISUALIZER_CAPTURE_SIZE_MAX. VISUALIZER_CAPTURE_SIZE_MIN]
diff --git a/include/media/stagefright/MediaBuffer.h b/include/media/stagefright/MediaBuffer.h
index abfe068..a61ddaa 100644
--- a/include/media/stagefright/MediaBuffer.h
+++ b/include/media/stagefright/MediaBuffer.h
@@ -68,11 +68,16 @@
mMemory = mem;
}
- // Decrements the reference count and returns the buffer to its
- // associated MediaBufferGroup if the reference count drops to 0.
+ // If MediaBufferGroup is set, decrement the local reference count;
+ // if the local reference count drops to 0, return the buffer to the
+ // associated MediaBufferGroup.
+ //
+ // If no MediaBufferGroup is set, the local reference count must be zero
+ // when called, whereupon the MediaBuffer is deleted.
virtual void release();
- // Increments the reference count.
+ // Increments the local reference count.
+ // Use only when MediaBufferGroup is set.
virtual void add_ref();
void *data() const;
@@ -97,7 +102,28 @@
// MetaData.
MediaBuffer *clone();
- int refcount() const;
+ // sum of localRefcount() and remoteRefcount()
+ int refcount() const {
+ return localRefcount() + remoteRefcount();
+ }
+
+ int localRefcount() const {
+ return mRefCount;
+ }
+
+ int remoteRefcount() const {
+ if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
+ int32_t remoteRefcount =
+ reinterpret_cast<SharedControl *>(mMemory->pointer())->getRemoteRefcount();
+ // Sanity check so that remoteRefCount() is non-negative.
+ return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
+ }
+
+ // returns old value
+ int addRemoteRefcount(int32_t value) {
+ if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
+ return reinterpret_cast<SharedControl *>(mMemory->pointer())->addRemoteRefcount(value);
+ }
bool isDeadObject() const {
return isDeadObject(mMemory);
@@ -117,25 +143,6 @@
}
protected:
- // MediaBuffer remote releases are handled through a
- // pending release count variable stored in a SharedControl block
- // at the start of the IMemory.
-
- // Returns old value of pending release count.
- inline int32_t addPendingRelease(int32_t value) {
- return getSharedControl()->addPendingRelease(value);
- }
-
- // Issues all pending releases (works in parallel).
- // Assumes there is a MediaBufferObserver.
- inline void resolvePendingRelease() {
- if (mMemory.get() == nullptr) return;
- while (addPendingRelease(-1) > 0) {
- release();
- }
- addPendingRelease(1);
- }
-
// true if MediaBuffer is observed (part of a MediaBufferGroup).
inline bool isObserved() const {
return mObserver != nullptr;
@@ -181,18 +188,18 @@
};
// returns old value
- inline int32_t addPendingRelease(int32_t value) {
+ inline int32_t addRemoteRefcount(int32_t value) {
return std::atomic_fetch_add_explicit(
- &mPendingRelease, (int_least32_t)value, std::memory_order_seq_cst);
+ &mRemoteRefcount, (int_least32_t)value, std::memory_order_seq_cst);
}
- inline int32_t getPendingRelease() const {
- return std::atomic_load_explicit(&mPendingRelease, std::memory_order_seq_cst);
+ inline int32_t getRemoteRefcount() const {
+ return std::atomic_load_explicit(&mRemoteRefcount, std::memory_order_seq_cst);
}
- inline void setPendingRelease(int32_t value) {
+ inline void setRemoteRefcount(int32_t value) {
std::atomic_store_explicit(
- &mPendingRelease, (int_least32_t)value, std::memory_order_seq_cst);
+ &mRemoteRefcount, (int_least32_t)value, std::memory_order_seq_cst);
}
inline bool isDeadObject() const {
@@ -209,13 +216,13 @@
std::atomic_store_explicit(
&mFlags, (int_least32_t)0, std::memory_order_seq_cst);
std::atomic_store_explicit(
- &mPendingRelease, (int_least32_t)0, std::memory_order_seq_cst);
+ &mRemoteRefcount, (int_least32_t)0, std::memory_order_seq_cst);
}
private:
// Caution: atomic_int_fast32_t is 64 bits on LP64.
std::atomic_int_least32_t mFlags;
- std::atomic_int_least32_t mPendingRelease;
+ std::atomic_int_least32_t mRemoteRefcount;
int32_t unused[6]; // additional buffer space
};
diff --git a/include/media/stagefright/MediaBufferGroup.h b/include/media/stagefright/MediaBufferGroup.h
index dfa31b2..3051406 100644
--- a/include/media/stagefright/MediaBufferGroup.h
+++ b/include/media/stagefright/MediaBufferGroup.h
@@ -53,10 +53,7 @@
size_t buffers() const { return mBuffers.size(); }
- // freeBuffers is the number of free buffers allowed to remain.
- void gc(size_t freeBuffers = 0);
-
-protected:
+ // If buffer is nullptr, have acquire_buffer() check for remote release.
virtual void signalBufferReturned(MediaBuffer *buffer);
private:
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ffdb9b5..7becf57 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -522,6 +522,12 @@
mTimestampMutator.push(timestamp);
}
+ // Flushes the shared ring buffer if the client had requested it using mStreaming.mFlush.
+ // If flush occurs then:
+ // cblk->u.mStreaming.mFront, ServerProxy::mFlush and ServerProxy::mFlushed will be modified
+ // client will be notified via Futex
+ virtual void flushBufferIfNeeded();
+
// Total count of the number of flushed frames since creation (never reset).
virtual int64_t framesFlushed() const { return mFlushed; }
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 7119517..846f8b8 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -622,6 +622,56 @@
}
__attribute__((no_sanitize("integer")))
+void ServerProxy::flushBufferIfNeeded()
+{
+ audio_track_cblk_t* cblk = mCblk;
+ // The acquire_load is not really required. But since the write is a release_store in the
+ // client, using acquire_load here makes it easier for people to maintain the code,
+ // and the logic for communicating ipc variables seems somewhat standard,
+ // and there really isn't much penalty for 4 or 8 byte atomics.
+ int32_t flush = android_atomic_acquire_load(&cblk->u.mStreaming.mFlush);
+ if (flush != mFlush) {
+ ALOGV("ServerProxy::flushBufferIfNeeded() mStreaming.mFlush = 0x%x, mFlush = 0x%0x",
+ flush, mFlush);
+ int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+ int32_t front = cblk->u.mStreaming.mFront;
+
+ // effectively obtain then release whatever is in the buffer
+ const size_t overflowBit = mFrameCountP2 << 1;
+ const size_t mask = overflowBit - 1;
+ int32_t newFront = (front & ~mask) | (flush & mask);
+ ssize_t filled = rear - newFront;
+ if (filled >= (ssize_t)overflowBit) {
+ // front and rear offsets span the overflow bit of the p2 mask
+ // so rebasing newFront on the front offset is off by the overflow bit.
+ // adjust newFront to match rear offset.
+ ALOGV("flush wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
+ newFront += overflowBit;
+ filled -= overflowBit;
+ }
+ // Rather than shutting down on a corrupt flush, just treat it as a full flush
+ if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+ ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, "
+ "filled %zd=%#x",
+ mFlush, flush, front, rear,
+ (unsigned)mask, newFront, filled, (unsigned)filled);
+ newFront = rear;
+ }
+ mFlush = flush;
+ android_atomic_release_store(newFront, &cblk->u.mStreaming.mFront);
+ // There is no danger from a false positive, so err on the side of caution
+ if (true /*front != newFront*/) {
+ int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
+ if (!(old & CBLK_FUTEX_WAKE)) {
+ (void) syscall(__NR_futex, &cblk->mFutex,
+ mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+ }
+ }
+ mFlushed += (newFront - front) & mask;
+ }
+}
+
+__attribute__((no_sanitize("integer")))
status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
{
LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
@@ -636,44 +686,9 @@
int32_t rear;
// See notes on barriers at ClientProxy::obtainBuffer()
if (mIsOut) {
- int32_t flush = cblk->u.mStreaming.mFlush;
+ flushBufferIfNeeded(); // might modify mFront
rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
front = cblk->u.mStreaming.mFront;
- if (flush != mFlush) {
- // effectively obtain then release whatever is in the buffer
- const size_t overflowBit = mFrameCountP2 << 1;
- const size_t mask = overflowBit - 1;
- int32_t newFront = (front & ~mask) | (flush & mask);
- ssize_t filled = rear - newFront;
- if (filled >= (ssize_t)overflowBit) {
- // front and rear offsets span the overflow bit of the p2 mask
- // so rebasing newFront on the front offset is off by the overflow bit.
- // adjust newFront to match rear offset.
- ALOGV("flush wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
- newFront += overflowBit;
- filled -= overflowBit;
- }
- // Rather than shutting down on a corrupt flush, just treat it as a full flush
- if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
- ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, "
- "filled %zd=%#x",
- mFlush, flush, front, rear,
- (unsigned)mask, newFront, filled, (unsigned)filled);
- newFront = rear;
- }
- mFlush = flush;
- android_atomic_release_store(newFront, &cblk->u.mStreaming.mFront);
- // There is no danger from a false positive, so err on the side of caution
- if (true /*front != newFront*/) {
- int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
- if (!(old & CBLK_FUTEX_WAKE)) {
- (void) syscall(__NR_futex, &cblk->mFutex,
- mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
- }
- }
- mFlushed += (newFront - front) & mask;
- front = newFront;
- }
} else {
front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
rear = cblk->u.mStreaming.mRear;
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index 26dd2c9..302e6ee 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -150,10 +150,10 @@
if (isCryptoError(result)) {
errorDetailMsg->setTo(reply.readCString());
- }
-
- if (dstType == kDestinationTypeVmPointer && result >= 0) {
- reply.read(dstPtr, result);
+ } else if (dstType == kDestinationTypeVmPointer) {
+ // For the non-secure case, copy the decrypted
+ // data from shared memory to its final destination
+ memcpy(dstPtr, sharedBuffer->pointer(), result);
}
return result;
@@ -369,7 +369,11 @@
if (dstType == kDestinationTypeVmPointer) {
if (result >= 0) {
CHECK_LE(result, static_cast<ssize_t>(totalSize));
- reply->write(dstPtr, result);
+ // For the non-secure case, pass the decrypted
+ // data back via the shared buffer rather than
+ // copying it separately over binder to avoid
+ // binder's 1MB limit.
+ memcpy(sharedBuffer->pointer(), dstPtr, result);
}
free(dstPtr);
dstPtr = NULL;
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index dd94ccf..595bad9 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -58,9 +58,9 @@
protected:
virtual ~RemoteMediaBufferWrapper() {
- // Indicate to MediaBufferGroup to release.
- int32_t old = addPendingRelease(1);
- ALOGV("RemoteMediaBufferWrapper: releasing %p, old %d", this, old);
+ // Release our interest in the MediaBuffer's shared memory.
+ int32_t old = addRemoteRefcount(-1);
+ ALOGV("RemoteMediaBufferWrapper: releasing %p, refcount %d", this, old - 1);
mMemory.clear(); // don't set the dead object flag.
}
};
@@ -296,8 +296,8 @@
case STOP: {
ALOGV("stop");
CHECK_INTERFACE(IMediaSource, data, reply);
+ mGroup->signalBufferReturned(nullptr);
status_t status = stop();
- mGroup->gc();
mIndexCache.reset();
mBuffersSinceStop = 0;
return status;
@@ -305,6 +305,7 @@
case PAUSE: {
ALOGV("pause");
CHECK_INTERFACE(IMediaSource, data, reply);
+ mGroup->signalBufferReturned(nullptr);
return pause();
}
case GETFORMAT: {
@@ -336,7 +337,7 @@
&& len == sizeof(opts)
&& data.read((void *)&opts, len) == NO_ERROR;
- mGroup->gc(kBinderMediaBuffers /* freeBuffers */);
+ mGroup->signalBufferReturned(nullptr);
mIndexCache.gc();
size_t inlineTransferSize = 0;
status_t ret = NO_ERROR;
@@ -411,8 +412,9 @@
reply->writeInt32(offset);
reply->writeInt32(length);
buf->meta_data()->writeToParcel(*reply);
+ transferBuf->addRemoteRefcount(1);
if (transferBuf != buf) {
- buf->release();
+ transferBuf->release(); // release local ref
} else if (!supportNonblockingRead()) {
maxNumBuffers = 0; // stop readMultiple with one shared buffer.
}
@@ -423,12 +425,12 @@
reply->writeInt32(INLINE_BUFFER);
reply->writeByteArray(length, (uint8_t*)buf->data() + offset);
buf->meta_data()->writeToParcel(*reply);
- buf->release();
inlineTransferSize += length;
if (inlineTransferSize > kInlineMaxTransfer) {
maxNumBuffers = 0; // stop readMultiple if inline transfer is too large.
}
}
+ buf->release();
}
reply->writeInt32(NULL_BUFFER); // Indicate no more MediaBuffers.
reply->writeInt32(ret);
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 2f53637..7a72237 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -746,7 +746,7 @@
{ .duration = 2000, .waveFreq = { 0 }, 0, 0},
{ .duration = 0, .waveFreq = { 0 }, 0, 0}},
.repeatCnt = ToneGenerator::TONEGEN_INF,
- .repeatSegment = 0 }, // TONE_UK_RINGTONE
+ .repeatSegment = 0 }, // TONE_GB_RINGTONE
{ .segments = { { .duration = 400, .waveFreq = { 400, 450, 0 }, 0, 0 },
{ .duration = 200, .waveFreq = { 0 }, 0, 0 },
{ .duration = 400, .waveFreq = { 400, 450, 0 }, 0, 0 },
@@ -796,7 +796,7 @@
TONE_SUP_CALL_WAITING, // TONE_SUP_CALL_WAITING
TONE_SUP_RINGTONE // TONE_SUP_RINGTONE
},
- { // UK
+ { // GB
TONE_SUP_DIAL, // TONE_SUP_DIAL
TONE_SUP_BUSY, // TONE_SUP_BUSY
TONE_SUP_CONGESTION, // TONE_SUP_CONGESTION
@@ -804,7 +804,7 @@
TONE_SUP_RADIO_NOTAVAIL, // TONE_SUP_RADIO_NOTAVAIL
TONE_SUP_ERROR, // TONE_SUP_ERROR
TONE_SUP_CALL_WAITING, // TONE_SUP_CALL_WAITING
- TONE_UK_RINGTONE // TONE_SUP_RINGTONE
+ TONE_GB_RINGTONE // TONE_SUP_RINGTONE
},
{ // AUSTRALIA
TONE_ANSI_DIAL, // TONE_SUP_DIAL
@@ -869,8 +869,8 @@
mRegion = ANSI;
} else if (strstr(value, "jp") != NULL) {
mRegion = JAPAN;
- } else if (strstr(value, "uk") != NULL) {
- mRegion = UK;
+ } else if (strstr(value, "gb") != NULL) {
+ mRegion = GB;
} else if (strstr(value, "au") != NULL) {
mRegion = AUSTRALIA;
} else {
@@ -1612,8 +1612,8 @@
lS1 = (long)0;
lS2 = (long)mS2_0;
} else {
- lS1 = (long)mS1;
- lS2 = (long)mS2;
+ lS1 = mS1;
+ lS2 = mS2;
}
lA1 = (long)mA1_Q14;
lAmplitude = (long)mAmplitude_Q15;
@@ -1649,8 +1649,8 @@
}
// save status
- mS1 = (short)lS1;
- mS2 = (short)lS2;
+ mS1 = lS1;
+ mS2 = lS2;
}
} // end namespace android
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 31e310b..37bf0bd 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -55,7 +55,7 @@
{
ALOGV("Visualizer::~Visualizer()");
setEnabled(false);
- setCaptureCallBack(NULL, NULL, 0, 0, true);
+ setCaptureCallBack(NULL, NULL, 0, 0);
}
status_t Visualizer::setEnabled(bool enabled)
@@ -77,13 +77,11 @@
status_t status = AudioEffect::setEnabled(enabled);
- if (status == NO_ERROR) {
- if (t != 0) {
- if (enabled) {
- t->run("Visualizer");
- } else {
- t->requestExit();
- }
+ if (t != 0) {
+ if (enabled && status == NO_ERROR) {
+ t->run("Visualizer");
+ } else {
+ t->requestExit();
}
}
@@ -95,14 +93,14 @@
}
status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags,
- uint32_t rate, bool force)
+ uint32_t rate)
{
if (rate > CAPTURE_RATE_MAX) {
return BAD_VALUE;
}
Mutex::Autolock _l(mCaptureLock);
- if (force || mEnabled) {
+ if (mEnabled) {
return INVALID_OPERATION;
}
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 3fffdc1a..5027e01 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -30,6 +30,7 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/Utils.h>
namespace android {
@@ -100,26 +101,38 @@
void NuPlayer::HTTPLiveSource::start() {
}
-sp<AMessage> NuPlayer::HTTPLiveSource::getFormat(bool audio) {
- sp<AMessage> format;
- status_t err = -EWOULDBLOCK;
+sp<MetaData> NuPlayer::HTTPLiveSource::getFormatMeta(bool audio) {
+ sp<MetaData> meta;
if (mLiveSession != NULL) {
- err = mLiveSession->getStreamFormat(
+ mLiveSession->getStreamFormatMeta(
audio ? LiveSession::STREAMTYPE_AUDIO
: LiveSession::STREAMTYPE_VIDEO,
- &format);
+ &meta);
}
+ return meta;
+}
+
+sp<AMessage> NuPlayer::HTTPLiveSource::getFormat(bool audio) {
+ sp<MetaData> meta;
+ status_t err = -EWOULDBLOCK;
+ if (mLiveSession != NULL) {
+ err = mLiveSession->getStreamFormatMeta(
+ audio ? LiveSession::STREAMTYPE_AUDIO
+ : LiveSession::STREAMTYPE_VIDEO,
+ &meta);
+ }
+
+ sp<AMessage> format;
if (err == -EWOULDBLOCK) {
format = new AMessage();
format->setInt32("err", err);
return format;
}
- if (err != OK) {
+ if (err != OK || convertMetaDataToMessage(meta, &format) != OK) {
return NULL;
}
-
return format;
}
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 9e0ec2f..574937d 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -38,6 +38,7 @@
virtual void start();
virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+ virtual sp<MetaData> getFormatMeta(bool audio);
virtual sp<AMessage> getFormat(bool audio);
virtual status_t feedMoreTSData();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 134da14..dc4e5d4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1339,7 +1339,16 @@
}
sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+ sp<MetaData> videoMeta = mSource->getFormatMeta(false /* audio */);
+ if (audioMeta == NULL && videoMeta == NULL) {
+ ALOGE("no metadata for either audio or video source");
+ mSource->stop();
+ mSourceStarted = false;
+ notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_MALFORMED);
+ return;
+ }
ALOGV_IF(audioMeta == NULL, "no metadata for audio source"); // video only stream
+
audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
if (mAudioSink != NULL) {
streamType = mAudioSink->getAudioStreamType();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index cf38efc..594128c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -754,7 +754,7 @@
status_t NuPlayer::Decoder::fetchInputData(sp<AMessage> &reply) {
sp<ABuffer> accessUnit;
- bool dropAccessUnit;
+ bool dropAccessUnit = true;
do {
status_t err = mSource->dequeueAccessUnit(mIsAudio, &accessUnit);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index b47a4f1..b742762 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -141,6 +141,17 @@
mAudioSink->flush();
mAudioSink->close();
}
+
+ // Try to avoid racing condition in case callback is still on.
+ Mutex::Autolock autoLock(mLock);
+ mUseAudioCallback = false;
+ flushQueue(&mAudioQueue);
+ flushQueue(&mVideoQueue);
+ mWakeLock.clear();
+ mMediaClock.clear();
+ mVideoScheduler.clear();
+ mNotify.clear();
+ mAudioSink.clear();
}
void NuPlayer::Renderer::queueBuffer(
@@ -744,7 +755,7 @@
case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
{
ALOGV("AudioSink::CB_EVENT_STREAM_END");
- me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
+ me->notifyEOSCallback();
break;
}
@@ -759,6 +770,16 @@
return 0;
}
+void NuPlayer::Renderer::notifyEOSCallback() {
+ Mutex::Autolock autoLock(mLock);
+
+ if (!mUseAudioCallback) {
+ return;
+ }
+
+ notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
+}
+
size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
Mutex::Autolock autoLock(mLock);
@@ -1190,8 +1211,10 @@
msg->setWhat(kWhatPostDrainVideoQueue);
msg->post(postDelayUs);
mVideoScheduler->restart();
- ALOGI("possible video time jump of %dms or uninitialized media clock, retrying in %dms",
- (int)(delayUs / 1000), (int)(postDelayUs / 1000));
+ ALOGI("possible video time jump of %dms (%lld : %lld) or uninitialized media clock,"
+ " retrying in %dms",
+ (int)(delayUs / 1000), (long long)mediaTimeUs,
+ (long long)mAudioFirstAnchorTimeMediaUs, (int)(postDelayUs / 1000));
mDrainVideoQueuePending = true;
return;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 004e21c..fe7f8fa 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -212,6 +212,7 @@
status_t getCurrentPositionFromAnchor(
int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo = false);
+ void notifyEOSCallback();
size_t fillAudioBuffer(void *buffer, size_t size);
bool onDrainAudioQueue();
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 8a305de..c4e5df7 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -58,6 +58,7 @@
mDisconnectReplyID(0),
mBuffering(false),
mInPreparationPhase(true),
+ mEOSPending(false),
mSeekGeneration(0),
mEOSTimeoutAudio(0),
mEOSTimeoutVideo(0) {
@@ -200,34 +201,28 @@
status_t finalResult;
if (!source->hasBufferAvailable(&finalResult)) {
if (finalResult == OK) {
- int64_t mediaDurationUs = 0;
- getDuration(&mediaDurationUs);
- sp<AnotherPacketSource> otherSource = getSource(!audio);
- status_t otherFinalResult;
- // If other source already signaled EOS, this source should also signal EOS
- if (otherSource != NULL &&
- !otherSource->hasBufferAvailable(&otherFinalResult) &&
- otherFinalResult == ERROR_END_OF_STREAM) {
- source->signalEOS(ERROR_END_OF_STREAM);
+ // If other source already signaled EOS, this source should also return EOS
+ if (sourceReachedEOS(!audio)) {
return ERROR_END_OF_STREAM;
}
// If this source has detected near end, give it some time to retrieve more
- // data before signaling EOS
+ // data before returning EOS
+ int64_t mediaDurationUs = 0;
+ getDuration(&mediaDurationUs);
if (source->isFinished(mediaDurationUs)) {
int64_t eosTimeout = audio ? mEOSTimeoutAudio : mEOSTimeoutVideo;
if (eosTimeout == 0) {
setEOSTimeout(audio, ALooper::GetNowUs());
} else if ((ALooper::GetNowUs() - eosTimeout) > kNearEOSTimeoutUs) {
setEOSTimeout(audio, 0);
- source->signalEOS(ERROR_END_OF_STREAM);
return ERROR_END_OF_STREAM;
}
return -EWOULDBLOCK;
}
- if (!(otherSource != NULL && otherSource->isFinished(mediaDurationUs))) {
+ if (!sourceNearEOS(!audio)) {
// We should not enter buffering mode
// if any of the sources already have detected EOS.
startBufferingIfNecessary();
@@ -306,6 +301,7 @@
mState = SEEKING;
mHandler->seek(seekTimeUs);
+ mEOSPending = false;
}
void NuPlayer::RTSPSource::schedulePollBuffering() {
@@ -314,10 +310,10 @@
}
void NuPlayer::RTSPSource::checkBuffering(
- bool *prepared, bool *underflow, bool *overflow, bool *startServer) {
+ bool *prepared, bool *underflow, bool *overflow, bool *startServer, bool *finished) {
size_t numTracks = mTracks.size();
- size_t preparedCount, underflowCount, overflowCount, startCount;
- preparedCount = underflowCount = overflowCount = startCount = 0;
+ size_t preparedCount, underflowCount, overflowCount, startCount, finishedCount;
+ preparedCount = underflowCount = overflowCount = startCount = finishedCount = 0;
size_t count = numTracks;
for (size_t i = 0; i < count; ++i) {
@@ -337,6 +333,7 @@
if (src->isFinished(/* duration */ 0)) {
++overflowCount;
+ ++finishedCount;
} else {
if (bufferedDurationUs < kUnderflowMarkUs) {
++underflowCount;
@@ -354,11 +351,12 @@
*underflow = (underflowCount > 0);
*overflow = (overflowCount == numTracks);
*startServer = (startCount > 0);
+ *finished = (finishedCount > 0);
}
void NuPlayer::RTSPSource::onPollBuffering() {
- bool prepared, underflow, overflow, startServer;
- checkBuffering(&prepared, &underflow, &overflow, &startServer);
+ bool prepared, underflow, overflow, startServer, finished;
+ checkBuffering(&prepared, &underflow, &overflow, &startServer, &finished);
if (prepared && mInPreparationPhase) {
mInPreparationPhase = false;
@@ -369,8 +367,11 @@
startBufferingIfNecessary();
}
- if (overflow && mHandler != NULL) {
+ if (haveSufficientDataOnAllTracks()) {
stopBufferingIfNecessary();
+ }
+
+ if (overflow && mHandler != NULL) {
mHandler->pause();
}
@@ -378,9 +379,72 @@
mHandler->resume();
}
+ if (finished && mHandler != NULL) {
+ mHandler->cancelAccessUnitTimeoutCheck();
+ }
+
schedulePollBuffering();
}
+void NuPlayer::RTSPSource::signalSourceEOS(status_t result) {
+ const bool audio = true;
+ const bool video = false;
+
+ sp<AnotherPacketSource> source = getSource(audio);
+ if (source != NULL) {
+ source->signalEOS(result);
+ }
+
+ source = getSource(video);
+ if (source != NULL) {
+ source->signalEOS(result);
+ }
+}
+
+bool NuPlayer::RTSPSource::sourceReachedEOS(bool audio) {
+ sp<AnotherPacketSource> source = getSource(audio);
+ status_t finalResult;
+ return (source != NULL &&
+ !source->hasBufferAvailable(&finalResult) &&
+ finalResult == ERROR_END_OF_STREAM);
+}
+
+bool NuPlayer::RTSPSource::sourceNearEOS(bool audio) {
+ sp<AnotherPacketSource> source = getSource(audio);
+ int64_t mediaDurationUs = 0;
+ getDuration(&mediaDurationUs);
+ return (source != NULL && source->isFinished(mediaDurationUs));
+}
+
+void NuPlayer::RTSPSource::onSignalEOS(const sp<AMessage> &msg) {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+
+ if (generation != mSeekGeneration) {
+ return;
+ }
+
+ if (mEOSPending) {
+ signalSourceEOS(ERROR_END_OF_STREAM);
+ mEOSPending = false;
+ }
+}
+
+void NuPlayer::RTSPSource::postSourceEOSIfNecessary() {
+ const bool audio = true;
+ const bool video = false;
+ // If a source has detected near end, give it some time to retrieve more
+ // data before signaling EOS
+ if (sourceNearEOS(audio) || sourceNearEOS(video)) {
+ if (!mEOSPending) {
+ sp<AMessage> msg = new AMessage(kWhatSignalEOS, this);
+ msg->setInt32("generation", mSeekGeneration);
+ msg->post(kNearEOSTimeoutUs);
+ mEOSPending = true;
+ }
+ }
+}
+
void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) {
if (msg->what() == kWhatDisconnect) {
sp<AReplyToken> replyID;
@@ -408,6 +472,9 @@
} else if (msg->what() == kWhatPollBuffering) {
onPollBuffering();
return;
+ } else if (msg->what() == kWhatSignalEOS) {
+ onSignalEOS(msg);
+ return;
}
CHECK_EQ(msg->what(), (int)kWhatNotify);
@@ -517,16 +584,10 @@
}
if (err != OK) {
- sp<AnotherPacketSource> source = getSource(false /* audio */);
- if (source != NULL) {
- source->signalEOS(err);
- }
-
- source = getSource(true /* audio */);
- if (source != NULL) {
- source->signalEOS(err);
- }
+ signalSourceEOS(err);
}
+
+ postSourceEOSIfNecessary();
break;
}
@@ -554,6 +615,7 @@
source->queueAccessUnit(accessUnit);
}
+ postSourceEOSIfNecessary();
break;
}
@@ -564,17 +626,7 @@
CHECK_NE(finalResult, (status_t)OK);
if (mTSParser != NULL) {
- sp<AnotherPacketSource> source = getSource(false /* audio */);
- if (source != NULL) {
- source->signalEOS(finalResult);
- }
-
- source = getSource(true /* audio */);
- if (source != NULL) {
- source->signalEOS(finalResult);
- }
-
- return;
+ signalSourceEOS(finalResult);
}
size_t trackIndex;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index a6a7644..c7834ef 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -64,6 +64,7 @@
kWhatDisconnect = 'disc',
kWhatPerformSeek = 'seek',
kWhatPollBuffering = 'poll',
+ kWhatSignalEOS = 'eos ',
};
enum State {
@@ -106,6 +107,7 @@
Mutex mBufferingLock;
bool mBuffering;
bool mInPreparationPhase;
+ bool mEOSPending;
sp<ALooper> mLooper;
sp<MyHandler> mHandler;
@@ -133,7 +135,12 @@
void performSeek(int64_t seekTimeUs);
void schedulePollBuffering();
- void checkBuffering(bool *prepared, bool *underflow, bool *overflow, bool *startServer);
+ void checkBuffering(
+ bool *prepared,
+ bool *underflow,
+ bool *overflow,
+ bool *startServer,
+ bool *finished);
void onPollBuffering();
bool haveSufficientDataOnAllTracks();
@@ -144,6 +151,13 @@
bool stopBufferingIfNecessary();
void finishSeek(status_t err);
+ void postSourceEOSIfNecessary();
+ void signalSourceEOS(status_t result);
+ void onSignalEOS(const sp<AMessage> &msg);
+
+ bool sourceNearEOS(bool audio);
+ bool sourceReachedEOS(bool audio);
+
DISALLOW_EVIL_CONSTRUCTORS(RTSPSource);
};
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 494fb45..b7d9965 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -1125,6 +1125,9 @@
int64_t token = IPCThreadState::self()->clearCallingIdentity();
mCamera->releaseRecordingFrameHandle(handle);
IPCThreadState::self()->restoreCallingIdentity(token);
+ } else {
+ native_handle_close(handle);
+ native_handle_delete(handle);
}
}
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 905c0ab..4a33e7a 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -375,7 +375,6 @@
mMdatFound(false),
mDataSource(source),
mInitCheck(NO_INIT),
- mHasVideo(false),
mHeaderTimescale(0),
mIsQT(false),
mFirstTrack(NULL),
@@ -547,11 +546,13 @@
}
if (mInitCheck == OK) {
- if (mHasVideo) {
+ if (findTrackByMimePrefix("video/") != NULL) {
mFileMetaData->setCString(
kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
- } else {
+ } else if (findTrackByMimePrefix("audio/") != NULL) {
mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
+ } else {
+ mFileMetaData->setCString(kKeyMIMEType, "application/octet-stream");
}
} else {
mInitCheck = err;
@@ -1498,8 +1499,6 @@
case FOURCC('h', 'v', 'c', '1'):
case FOURCC('h', 'e', 'v', '1'):
{
- mHasVideo = true;
-
uint8_t buffer[78];
if (chunk_data_size < (ssize_t)sizeof(buffer)) {
// Basic VideoSampleEntry size.
@@ -4196,11 +4195,13 @@
if (!mDataSource->getUInt32(offset, &flags)) {
return ERROR_MALFORMED;
}
- ALOGV("fragment run flags: %08x", flags);
-
- if (flags & 0xff000000) {
- return -EINVAL;
- }
+ // |version| only affects SampleCompositionTimeOffset field.
+ // If version == 0, SampleCompositionTimeOffset is uint32_t;
+ // Otherwise, SampleCompositionTimeOffset is int32_t.
+ // Sample.compositionOffset is defined as int32_t.
+ uint8_t version = flags >> 24;
+ flags &= 0xffffff;
+ ALOGV("fragment run version: 0x%02x, flags: 0x%06x", version, flags);
if ((flags & kFirstSampleFlagsPresent) && (flags & kSampleFlagsPresent)) {
// These two shall not be used together.
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 4681abd..5e96c2b 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -272,6 +272,7 @@
bool mIsHevc;
bool mIsAudio;
bool mIsMPEG4;
+ bool mIsMalformed;
int32_t mTrackId;
int64_t mTrackDurationUs;
int64_t mMaxChunkDurationUs;
@@ -1536,6 +1537,7 @@
mPaused(false),
mResumed(false),
mStarted(false),
+ mIsMalformed(false),
mTrackId(trackId),
mTrackDurationUs(0),
mEstimatedTrackSizeBytes(0),
@@ -2479,12 +2481,16 @@
ALOGW("Recorded file size exceeds limit %" PRId64 "bytes",
mOwner->mMaxFileSizeLimitBytes);
mOwner->notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+ copy->release();
+ mSource->stop();
break;
}
if (mOwner->exceedsFileDurationLimit()) {
ALOGW("Recorded file duration exceeds limit %" PRId64 "microseconds",
mOwner->mMaxFileDurationLimitUs);
mOwner->notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
+ copy->release();
+ mSource->stop();
break;
}
@@ -2505,13 +2511,17 @@
int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
copy->release();
- return ERROR_MALFORMED;
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
copy->release();
- return ERROR_MALFORMED;
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
previousPausedDurationUs += pausedDurationUs - lastDurationUs;
@@ -2521,7 +2531,9 @@
timestampUs -= previousPausedDurationUs;
if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
copy->release();
- return ERROR_MALFORMED;
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
if (!mIsAudio) {
@@ -2548,7 +2560,9 @@
timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
copy->release();
- return ERROR_MALFORMED;
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
timestampUs = decodingTimeUs;
@@ -2560,7 +2574,9 @@
(cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
copy->release();
- return ERROR_MALFORMED;
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
if (mStszTableEntries->count() == 0) {
@@ -2602,7 +2618,9 @@
if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
copy->release();
- return ERROR_MALFORMED;
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
@@ -2624,7 +2642,8 @@
(long long)timestampUs, (long long)lastTimestampUs, trackName);
copy->release();
mSource->stop();
- return UNKNOWN_ERROR;
+ mIsMalformed = true;
+ break;
}
// if the duration is different for this sample, see if it is close enough to the previous
@@ -2780,6 +2799,10 @@
}
bool MPEG4Writer::Track::isTrackMalFormed() const {
+ if (mIsMalformed) {
+ return true;
+ }
+
if (mStszTableEntries->count() == 0) { // no samples written
ALOGE("The number of recorded samples is 0");
return true;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index ff5c4d4..e476424 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -2160,14 +2160,19 @@
CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
dstBuffers->clear();
- const Vector<BufferInfo> &srcBuffers = mPortBuffers[portIndex];
+ // If we're using input surface (either non-persistent created by
+ // createInputSurface(), or persistent set by setInputSurface()),
+ // give the client an empty input buffers array.
+ if (portIndex != kPortIndexInput || !mHaveInputSurface) {
+ const Vector<BufferInfo> &srcBuffers = mPortBuffers[portIndex];
- for (size_t i = 0; i < srcBuffers.size(); ++i) {
- const BufferInfo &info = srcBuffers.itemAt(i);
+ for (size_t i = 0; i < srcBuffers.size(); ++i) {
+ const BufferInfo &info = srcBuffers.itemAt(i);
- dstBuffers->push_back(
- (portIndex == kPortIndexInput && mCrypto != NULL)
- ? info.mEncryptedData : info.mData);
+ dstBuffers->push_back(
+ (portIndex == kPortIndexInput && mCrypto != NULL)
+ ? info.mEncryptedData : info.mData);
+ }
}
(new AMessage)->postReply(replyID);
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 33d624e..ea5ef06 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -859,6 +859,7 @@
}
MediaBuffer *mbuf = new MediaBuffer(outbuf->size());
+ mbuf->setObserver(this);
mbuf->add_ref();
if (!(flags & MediaCodec::BUFFER_FLAG_CODECCONFIG)) {
@@ -911,7 +912,6 @@
mbuf->meta_data()->setInt32(kKeyIsSyncFrame, true);
}
memcpy(mbuf->data(), outbuf->data(), outbuf->size());
- mbuf->setObserver(this);
{
Mutexed<Output>::Locked output(mOutput);
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 55c1bc8..4558b3c 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -305,7 +305,10 @@
sp<IMediaSource> source = mImpl->getTrack(index);
- CHECK_EQ((status_t)OK, source->start());
+ status_t ret = source->start();
+ if (ret != OK) {
+ return ret;
+ }
mSelectedTracks.push();
TrackInfo *info = &mSelectedTracks.editItemAt(mSelectedTracks.size() - 1);
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index e994069..e40dbcf 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -303,6 +303,12 @@
status_t MuxOMX::freeNode(node_id node) {
Mutex::Autolock autoLock(mLock);
+ // exit if we have already freed the node
+ if (mNodeLocation.indexOfKey(node) < 0) {
+ ALOGD("MuxOMX::freeNode: node %d seems to be released already --- ignoring.", node);
+ return OK;
+ }
+
status_t err = getOMX_l(node)->freeNode(node);
if (err != OK) {
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index d2ba02e..be5067d 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -158,11 +158,14 @@
// TODO: Use Flexible color instead
videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
- // For the thumbnail extraction case, try to allocate single buffer
- // in both input and output ports. NOTE: This request may fail if
- // component requires more than that for decoding.
- videoFormat->setInt32("android._num-input-buffers", 1);
- videoFormat->setInt32("android._num-output-buffers", 1);
+ // For the thumbnail extraction case, try to allocate single buffer in both
+ // input and output ports, if seeking to a sync frame. NOTE: This request may
+ // fail if component requires more than that for decoding.
+ bool isSeekingClosest = (seekMode == MediaSource::ReadOptions::SEEK_CLOSEST);
+ if (!isSeekingClosest) {
+ videoFormat->setInt32("android._num-input-buffers", 1);
+ videoFormat->setInt32("android._num-output-buffers", 1);
+ }
status_t err;
sp<ALooper> looper = new ALooper;
@@ -254,6 +257,9 @@
bool isAvcOrHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
|| !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+ bool firstSample = true;
+ int64_t targetTimeUs = -1ll;
+
do {
size_t inputIndex = -1;
int64_t ptsUs = 0ll;
@@ -280,6 +286,11 @@
haveMoreInputs = false;
break;
}
+ if (firstSample && isSeekingClosest) {
+ mediaBuffer->meta_data()->findInt64(kKeyTargetTime, &targetTimeUs);
+ ALOGV("Seeking closest: targetTimeUs=%lld", (long long)targetTimeUs);
+ }
+ firstSample = false;
if (mediaBuffer->range_length() > codecBuffer->capacity()) {
ALOGE("buffer size (%zu) too large for codec input size (%zu)",
@@ -292,8 +303,9 @@
memcpy(codecBuffer->data(),
(const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
mediaBuffer->range_length());
- if (isAvcOrHevc && IsIDR(codecBuffer)) {
- // Only need to decode one IDR frame.
+ if (isAvcOrHevc && IsIDR(codecBuffer) && !isSeekingClosest) {
+ // Only need to decode one IDR frame, unless we're seeking with CLOSEST
+ // option, in which case we need to actually decode to targetTimeUs.
haveMoreInputs = false;
flags |= MediaCodec::BUFFER_FLAG_EOS;
}
@@ -340,8 +352,13 @@
ALOGV("Timed-out waiting for output.. retries left = %zu", retriesLeft);
err = OK;
} else if (err == OK) {
- ALOGV("Received an output buffer");
- done = true;
+ // If we're seeking with CLOSEST option and obtained a valid targetTimeUs
+ // from the extractor, decode to the specified frame. Otherwise we're done.
+ done = (targetTimeUs < 0ll) || (timeUs >= targetTimeUs);
+ ALOGV("Received an output buffer, timeUs=%lld", (long long)timeUs);
+ if (!done) {
+ err = decoder->releaseOutputBuffer(index);
+ }
} else {
ALOGW("Received error %d (%s) instead of output", err, asString(err));
done = true;
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 60ef662..568837a 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -186,10 +186,6 @@
break;
}
- if (img == NULL) {
- ALOGE("error pushing blank frames: lock returned NULL buffer");
- break;
- }
*img = 0;
err = buf->unlock();
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
index e329766..d7439b2 100644
--- a/media/libstagefright/foundation/ColorUtils.cpp
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -286,6 +286,7 @@
{ 15, ColorAspects::TransferSMPTE170M },
{ 16, ColorAspects::TransferST2084 },
{ 17, ColorAspects::TransferST428 },
+ { 18, ColorAspects::TransferHLG },
}
};
diff --git a/media/libstagefright/foundation/MediaBuffer.cpp b/media/libstagefright/foundation/MediaBuffer.cpp
index 718b7e5..16000ef 100644
--- a/media/libstagefright/foundation/MediaBuffer.cpp
+++ b/media/libstagefright/foundation/MediaBuffer.cpp
@@ -105,14 +105,7 @@
void MediaBuffer::release() {
if (mObserver == NULL) {
- if (mMemory.get() != nullptr) {
- // See if there is a pending release and there are no observers.
- // Ideally this never happens.
- while (addPendingRelease(-1) > 0) {
- __sync_fetch_and_sub(&mRefCount, 1);
- }
- addPendingRelease(1);
- }
+ // Legacy contract for MediaBuffer without a MediaBufferGroup.
CHECK_EQ(mRefCount, 0);
delete this;
return;
@@ -205,10 +198,6 @@
mObserver = observer;
}
-int MediaBuffer::refcount() const {
- return mRefCount;
-}
-
MediaBuffer *MediaBuffer::clone() {
CHECK(mGraphicBuffer == NULL);
diff --git a/media/libstagefright/foundation/MediaBufferGroup.cpp b/media/libstagefright/foundation/MediaBufferGroup.cpp
index cb78879..54f768a 100644
--- a/media/libstagefright/foundation/MediaBufferGroup.cpp
+++ b/media/libstagefright/foundation/MediaBufferGroup.cpp
@@ -51,7 +51,7 @@
for (size_t i = 0; i < buffers; ++i) {
sp<IMemory> mem = memoryDealer->allocate(augmented_size);
- if (mem.get() == nullptr) {
+ if (mem.get() == nullptr || mem->pointer() == nullptr) {
ALOGW("Only allocated %zu shared buffers of size %zu", i, buffer_size);
break;
}
@@ -76,11 +76,24 @@
MediaBufferGroup::~MediaBufferGroup() {
for (MediaBuffer *buffer : mBuffers) {
- buffer->resolvePendingRelease();
- // If we don't release it, perhaps noone will release it.
- LOG_ALWAYS_FATAL_IF(buffer->refcount() != 0,
- "buffer refcount %p = %d != 0", buffer, buffer->refcount());
- // actually delete it.
+ if (buffer->refcount() != 0) {
+ const int localRefcount = buffer->localRefcount();
+ const int remoteRefcount = buffer->remoteRefcount();
+
+ // Fatal if we have a local refcount.
+ LOG_ALWAYS_FATAL_IF(localRefcount != 0,
+ "buffer(%p) localRefcount %d != 0, remoteRefcount %d",
+ buffer, localRefcount, remoteRefcount);
+
+ // Log an error if we have a remaining remote refcount,
+ // as the remote process may have died or may have inappropriate behavior.
+ // The shared memory associated with the MediaBuffer will
+ // automatically be reclaimed when there are no remaining fds
+ // associated with it.
+ ALOGE("buffer(%p) has residual remoteRefcount %d",
+ buffer, remoteRefcount);
+ }
+ // gracefully delete.
buffer->setObserver(nullptr);
buffer->release();
}
@@ -94,32 +107,11 @@
// optionally: mGrowthLimit = max(mGrowthLimit, mBuffers.size());
}
-void MediaBufferGroup::gc(size_t freeBuffers) {
- Mutex::Autolock autoLock(mLock);
-
- size_t freeCount = 0;
- for (auto it = mBuffers.begin(); it != mBuffers.end(); ) {
- (*it)->resolvePendingRelease();
- if ((*it)->isDeadObject()) {
- // The MediaBuffer has been deleted, why is it in the MediaBufferGroup?
- LOG_ALWAYS_FATAL("buffer(%p) has dead object with refcount %d",
- (*it), (*it)->refcount());
- } else if ((*it)->refcount() == 0 && ++freeCount > freeBuffers) {
- (*it)->setObserver(nullptr);
- (*it)->release();
- it = mBuffers.erase(it);
- } else {
- ++it;
- }
- }
-}
-
bool MediaBufferGroup::has_buffers() {
if (mBuffers.size() < mGrowthLimit) {
return true; // We can add more buffers internally.
}
for (MediaBuffer *buffer : mBuffers) {
- buffer->resolvePendingRelease();
if (buffer->refcount() == 0) {
return true;
}
@@ -135,7 +127,6 @@
MediaBuffer *buffer = nullptr;
auto free = mBuffers.end();
for (auto it = mBuffers.begin(); it != mBuffers.end(); ++it) {
- (*it)->resolvePendingRelease();
if ((*it)->refcount() == 0) {
const size_t size = (*it)->size();
if (size >= requestedSize) {
diff --git a/media/libstagefright/foundation/MetaData.cpp b/media/libstagefright/foundation/MetaData.cpp
index b4abc60..a8965f0 100644
--- a/media/libstagefright/foundation/MetaData.cpp
+++ b/media/libstagefright/foundation/MetaData.cpp
@@ -392,8 +392,12 @@
}
status_t MetaData::writeToParcel(Parcel &parcel) {
+ status_t ret;
size_t numItems = mItems.size();
- parcel.writeUint32(uint32_t(numItems));
+ ret = parcel.writeUint32(uint32_t(numItems));
+ if (ret) {
+ return ret;
+ }
for (size_t i = 0; i < numItems; i++) {
int32_t key = mItems.keyAt(i);
const typed_data &item = mItems.valueAt(i);
@@ -401,9 +405,32 @@
const void *data;
size_t size;
item.getData(&type, &data, &size);
- parcel.writeInt32(key);
- parcel.writeUint32(type);
- parcel.writeByteArray(size, (uint8_t*)data);
+ ret = parcel.writeInt32(key);
+ if (ret) {
+ return ret;
+ }
+ ret = parcel.writeUint32(type);
+ if (ret) {
+ return ret;
+ }
+ if (type == TYPE_NONE) {
+ android::Parcel::WritableBlob blob;
+ ret = parcel.writeUint32(static_cast<uint32_t>(size));
+ if (ret) {
+ return ret;
+ }
+ ret = parcel.writeBlob(size, false, &blob);
+ if (ret) {
+ return ret;
+ }
+ memcpy(blob.data(), data, size);
+ blob.release();
+ } else {
+ ret = parcel.writeByteArray(size, (uint8_t*)data);
+ if (ret) {
+ return ret;
+ }
+ }
}
return OK;
}
@@ -422,8 +449,20 @@
if (ret != OK) {
break;
}
- // copy data directly from Parcel storage, then advance position
- setData(key, type, parcel.readInplace(size), size);
+ // copy data from Blob, which may be inline in Parcel storage,
+ // then advance position
+ if (type == TYPE_NONE) {
+ android::Parcel::ReadableBlob blob;
+ ret = parcel.readBlob(size, &blob);
+ if (ret != OK) {
+ break;
+ }
+ setData(key, type, blob.data(), size);
+ blob.release();
+ } else {
+ // copy data directly from Parcel storage, then advance position
+ setData(key, type, parcel.readInplace(size), size);
+ }
}
return OK;
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index cebf95c..7abc019 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -467,28 +467,28 @@
return err;
}
-status_t LiveSession::getStreamFormat(StreamType stream, sp<AMessage> *format) {
+status_t LiveSession::getStreamFormatMeta(StreamType stream, sp<MetaData> *meta) {
if (!(mStreamMask & stream)) {
return UNKNOWN_ERROR;
}
sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
- sp<MetaData> meta = packetSource->getFormat();
+ *meta = packetSource->getFormat();
- if (meta == NULL) {
+ if (*meta == NULL) {
return -EWOULDBLOCK;
}
if (stream == STREAMTYPE_AUDIO) {
// set AAC input buffer size to 32K bytes (256kbps x 1sec)
- meta->setInt32(kKeyMaxInputSize, 32 * 1024);
+ (*meta)->setInt32(kKeyMaxInputSize, 32 * 1024);
} else if (stream == STREAMTYPE_VIDEO) {
- meta->setInt32(kKeyMaxWidth, mMaxWidth);
- meta->setInt32(kKeyMaxHeight, mMaxHeight);
+ (*meta)->setInt32(kKeyMaxWidth, mMaxWidth);
+ (*meta)->setInt32(kKeyMaxHeight, mMaxHeight);
}
- return convertMetaDataToMessage(meta, format);
+ return OK;
}
sp<HTTPDownloader> LiveSession::getHTTPDownloader() {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 90d56d0..b600eba 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -75,7 +75,7 @@
int64_t calculateMediaTimeUs(int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq);
status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
- status_t getStreamFormat(StreamType stream, sp<AMessage> *format);
+ status_t getStreamFormatMeta(StreamType stream, sp<MetaData> *meta);
sp<HTTPDownloader> getHTTPDownloader();
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 37c35e3..836cb08 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -93,7 +93,6 @@
sp<DataSource> mDataSource;
status_t mInitCheck;
- bool mHasVideo;
uint32_t mHeaderTimescale;
bool mIsQT;
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index b863d67..844479e 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -701,6 +701,10 @@
}
mPayloadStarted = true;
+ // There should be at most 2 elements in |mPesStartOffsets|.
+ while (mPesStartOffsets.size() >= 2) {
+ mPesStartOffsets.erase(mPesStartOffsets.begin());
+ }
mPesStartOffsets.push_back(offset);
}
@@ -1104,15 +1108,20 @@
mSource->queueAccessUnit(accessUnit);
}
- if ((event != NULL) && !found && mQueue->getFormat() != NULL) {
+ // Every access unit has a pesStartOffset queued in |mPesStartOffsets|.
+ off64_t pesStartOffset = -1;
+ if (!mPesStartOffsets.empty()) {
+ pesStartOffset = *mPesStartOffsets.begin();
+ mPesStartOffsets.erase(mPesStartOffsets.begin());
+ }
+
+ if (pesStartOffset >= 0 && (event != NULL) && !found && mQueue->getFormat() != NULL) {
int32_t sync = 0;
if (accessUnit->meta()->findInt32("isSync", &sync) && sync) {
int64_t timeUs;
if (accessUnit->meta()->findInt64("timeUs", &timeUs)) {
found = true;
- off64_t pesStartOffset = *mPesStartOffsets.begin();
event->init(pesStartOffset, mSource, timeUs);
- mPesStartOffsets.erase(mPesStartOffsets.begin());
}
}
}
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index f1a1921..8b6c591 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -139,7 +139,6 @@
mRepeatLastFrameTimestamp(-1ll),
mLatestBufferId(-1),
mLatestBufferFrameNum(0),
- mLatestBufferUseCount(0),
mLatestBufferFence(Fence::NO_FENCE),
mRepeatBufferDeferred(false),
mTimePerCaptureUs(-1ll),
@@ -400,12 +399,16 @@
sp<Fence> fence = new Fence(fenceFd);
if (mBufferSlot[id] != NULL &&
mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
- ALOGV("cbi %d matches bq slot %d, handle=%p",
- cbi, id, mBufferSlot[id]->handle);
+ mBufferUseCount[id]--;
- if (id == mLatestBufferId) {
- CHECK_GT(mLatestBufferUseCount--, 0);
- } else {
+ ALOGV("codecBufferEmptied: slot=%d, cbi=%d, useCount=%d, handle=%p",
+ id, cbi, mBufferUseCount[id], mBufferSlot[id]->handle);
+
+ if (mBufferUseCount[id] < 0) {
+ ALOGW("mBufferUseCount for bq slot %d < 0 (=%d)", id, mBufferUseCount[id]);
+ mBufferUseCount[id] = 0;
+ }
+ if (id != mLatestBufferId && mBufferUseCount[id] == 0) {
releaseBuffer(id, codecBuffer.mFrameNumber, mBufferSlot[id], fence);
}
} else {
@@ -626,6 +629,7 @@
if (item.mGraphicBuffer != NULL) {
ALOGV("fillCodecBuffer_l: setting mBufferSlot %d", item.mSlot);
mBufferSlot[item.mSlot] = item.mGraphicBuffer;
+ mBufferUseCount[item.mSlot] = 0;
}
if (item.mDataSpace != mLastDataSpace) {
@@ -711,7 +715,7 @@
return false;
}
- ++mLatestBufferUseCount;
+ ++mBufferUseCount[item.mSlot];
/* repeat last frame up to kRepeatLastFrameCount times.
* in case of static scene, a single repeat might not get rid of encoder
@@ -732,10 +736,8 @@
void GraphicBufferSource::setLatestBuffer_l(
const BufferItem &item, bool dropped) {
- ALOGV("setLatestBuffer_l");
-
if (mLatestBufferId >= 0) {
- if (mLatestBufferUseCount == 0) {
+ if (mBufferUseCount[mLatestBufferId] == 0) {
releaseBuffer(mLatestBufferId, mLatestBufferFrameNum,
mBufferSlot[mLatestBufferId], mLatestBufferFence);
// mLatestBufferFence will be set to new fence just below
@@ -746,7 +748,13 @@
mLatestBufferFrameNum = item.mFrameNumber;
mRepeatLastFrameTimestamp = item.mTimestamp + mRepeatAfterUs * 1000;
- mLatestBufferUseCount = dropped ? 0 : 1;
+ if (!dropped) {
+ ++mBufferUseCount[item.mSlot];
+ }
+
+ ALOGV("setLatestBuffer_l: slot=%d, useCount=%d",
+ item.mSlot, mBufferUseCount[item.mSlot]);
+
mRepeatBufferDeferred = false;
mRepeatLastFrameCount = kRepeatLastFrameCount;
mLatestBufferFence = item.mFence;
@@ -839,11 +847,11 @@
int64_t timestampGapUs = originalTimeUs - mPrevOriginalTimeUs;
timeUs = (timestampGapUs < mMaxTimestampGapUs ?
timestampGapUs : mMaxTimestampGapUs) + mPrevModifiedTimeUs;
- mOriginalTimeUs.add(timeUs, originalTimeUs);
- ALOGV("IN timestamp: %lld -> %lld",
- static_cast<long long>(originalTimeUs),
- static_cast<long long>(timeUs));
}
+ mOriginalTimeUs.add(timeUs, originalTimeUs);
+ ALOGV("IN timestamp: %lld -> %lld",
+ static_cast<long long>(originalTimeUs),
+ static_cast<long long>(timeUs));
}
mPrevOriginalTimeUs = originalTimeUs;
@@ -854,7 +862,7 @@
}
status_t GraphicBufferSource::submitBuffer_l(const BufferItem &item, int cbi) {
- ALOGV("submitBuffer_l cbi=%d", cbi);
+ ALOGV("submitBuffer_l: slot=%d, cbi=%d", item.mSlot, cbi);
int64_t timeUs = getTimestamp(item);
if (timeUs < 0ll) {
@@ -947,6 +955,7 @@
void GraphicBufferSource::releaseBuffer(
int &id, uint64_t frameNum,
const sp<GraphicBuffer> buffer, const sp<Fence> &fence) {
+ ALOGV("releaseBuffer: slot=%d", id);
if (mIsPersistent) {
mConsumer->detachBuffer(id);
mBufferSlot[id] = NULL;
@@ -996,6 +1005,7 @@
if (item.mGraphicBuffer != NULL) {
ALOGV("onFrameAvailable: setting mBufferSlot %d", item.mSlot);
mBufferSlot[item.mSlot] = item.mGraphicBuffer;
+ mBufferUseCount[item.mSlot] = 0;
}
releaseBuffer(item.mSlot, item.mFrameNumber,
@@ -1029,6 +1039,7 @@
for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
if ((slotMask & 0x01) != 0) {
mBufferSlot[i] = NULL;
+ mBufferUseCount[i] = 0;
}
slotMask >>= 1;
}
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index adc7afd..b3fcd1b 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -291,6 +291,7 @@
// is done processing a GraphicBuffer, we can use this to map back
// to a slot number.
sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS];
+ int32_t mBufferUseCount[BufferQueue::NUM_BUFFER_SLOTS];
// Tracks codec buffers.
Vector<CodecBuffer> mCodecBuffers;
@@ -323,7 +324,6 @@
int mLatestBufferId;
uint64_t mLatestBufferFrameNum;
- int32_t mLatestBufferUseCount;
sp<Fence> mLatestBufferFence;
// The previous buffer should've been repeated but
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 49f23be..f6e8d60 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -433,7 +433,7 @@
|| (index > (OMX_INDEXTYPE)OMX_IndexExtAudioStartUnused
&& index <= (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported)
|| (index > (OMX_INDEXTYPE)OMX_IndexExtVideoStartUnused
- && index <= (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh)
+ && index <= (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering)
|| (index > (OMX_INDEXTYPE)OMX_IndexExtOtherStartUnused
&& index <= (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits)) {
return false;
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index a391911..76cbbc4 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -431,7 +431,19 @@
}
void SimpleSoftOMXComponent::onChangeState(OMX_STATETYPE state) {
+ ALOGV("%p requesting change from %d to %d", this, mState, state);
// We shouldn't be in a state transition already.
+
+ if (mState == OMX_StateLoaded
+ && mTargetState == OMX_StateIdle
+ && state == OMX_StateLoaded) {
+ // OMX specifically allows "canceling" a state transition from loaded
+ // to idle. Pretend we made it to idle, and go back to loaded
+ ALOGV("load->idle canceled");
+ mState = mTargetState = OMX_StateIdle;
+ state = OMX_StateLoaded;
+ }
+
CHECK_EQ((int)mState, (int)mTargetState);
switch (mState) {
@@ -611,6 +623,7 @@
}
if (transitionComplete) {
+ ALOGV("state transition from %d to %d complete", mState, mTargetState);
mState = mTargetState;
if (mState == OMX_StateLoaded) {
@@ -618,6 +631,8 @@
}
notify(OMX_EventCmdComplete, OMX_CommandStateSet, mState, NULL);
+ } else {
+ ALOGV("state transition from %d to %d not yet complete", mState, mTargetState);
}
}
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 42a1182..76e2e6e 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -1408,6 +1408,11 @@
msg->post((mKeepAliveTimeoutUs * 9) / 10);
}
+ void cancelAccessUnitTimeoutCheck() {
+ ALOGV("cancelAccessUnitTimeoutCheck");
+ ++mCheckGeneration;
+ }
+
void postAccessUnitTimeoutCheck() {
if (mCheckPending) {
return;
@@ -1792,14 +1797,8 @@
// Time is now established, lets start timestamping immediately
for (size_t i = 0; i < mTracks.size(); ++i) {
- TrackInfo *trackInfo = &mTracks.editItemAt(i);
- while (!trackInfo->mPackets.empty()) {
- sp<ABuffer> accessUnit = *trackInfo->mPackets.begin();
- trackInfo->mPackets.erase(trackInfo->mPackets.begin());
-
- if (addMediaTimestamp(i, trackInfo, accessUnit)) {
- postQueueAccessUnit(i, accessUnit);
- }
+ if (OK != processAccessUnitQueue(i)) {
+ return;
}
}
for (size_t i = 0; i < mTracks.size(); ++i) {
@@ -1812,26 +1811,8 @@
}
}
- void onAccessUnitComplete(
- int32_t trackIndex, const sp<ABuffer> &accessUnit) {
- ALOGV("onAccessUnitComplete track %d", trackIndex);
-
+ status_t processAccessUnitQueue(int32_t trackIndex) {
TrackInfo *track = &mTracks.editItemAt(trackIndex);
- if(!mPlayResponseParsed){
- uint32_t seqNum = (uint32_t)accessUnit->int32Data();
- ALOGI("play response is not parsed, storing accessunit %u", seqNum);
- track->mPackets.push_back(accessUnit);
- return;
- }
-
- handleFirstAccessUnit();
-
- if (!mAllTracksHaveTime) {
- ALOGV("storing accessUnit, no time established yet");
- track->mPackets.push_back(accessUnit);
- return;
- }
-
while (!track->mPackets.empty()) {
sp<ABuffer> accessUnit = *track->mPackets.begin();
track->mPackets.erase(track->mPackets.begin());
@@ -1842,27 +1823,29 @@
// by ARTPSource. Only the low 16 bits of seq in RTP-Info of reply of
// RTSP "PLAY" command should be used to detect the first RTP packet
// after seeking.
- if (track->mAllowedStaleAccessUnits > 0) {
- uint32_t seqNum16 = seqNum & 0xffff;
- uint32_t firstSeqNumInSegment16 = track->mFirstSeqNumInSegment & 0xffff;
- if (seqNum16 > firstSeqNumInSegment16 + kMaxAllowedStaleAccessUnits
- || seqNum16 < firstSeqNumInSegment16) {
- // Not the first rtp packet of the stream after seeking, discarding.
- track->mAllowedStaleAccessUnits--;
- ALOGV("discarding stale access unit (0x%x : 0x%x)",
- seqNum, track->mFirstSeqNumInSegment);
- continue;
+ if (mSeekable) {
+ if (track->mAllowedStaleAccessUnits > 0) {
+ uint32_t seqNum16 = seqNum & 0xffff;
+ uint32_t firstSeqNumInSegment16 = track->mFirstSeqNumInSegment & 0xffff;
+ if (seqNum16 > firstSeqNumInSegment16 + kMaxAllowedStaleAccessUnits
+ || seqNum16 < firstSeqNumInSegment16) {
+ // Not the first rtp packet of the stream after seeking, discarding.
+ track->mAllowedStaleAccessUnits--;
+ ALOGV("discarding stale access unit (0x%x : 0x%x)",
+ seqNum, track->mFirstSeqNumInSegment);
+ continue;
+ }
+ ALOGW_IF(seqNum16 != firstSeqNumInSegment16,
+ "Missing the first packet(%u), now take packet(%u) as first one",
+ track->mFirstSeqNumInSegment, seqNum);
+ } else { // track->mAllowedStaleAccessUnits <= 0
+ mNumAccessUnitsReceived = 0;
+ ALOGW_IF(track->mAllowedStaleAccessUnits == 0,
+ "Still no first rtp packet after %d stale ones",
+ kMaxAllowedStaleAccessUnits);
+ track->mAllowedStaleAccessUnits = -1;
+ return UNKNOWN_ERROR;
}
- ALOGW_IF(seqNum16 != firstSeqNumInSegment16,
- "Missing the first packet(%u), now take packet(%u) as first one",
- track->mFirstSeqNumInSegment, seqNum);
- } else { // track->mAllowedStaleAccessUnits <= 0
- mNumAccessUnitsReceived = 0;
- ALOGW_IF(track->mAllowedStaleAccessUnits == 0,
- "Still no first rtp packet after %d stale ones",
- kMaxAllowedStaleAccessUnits);
- track->mAllowedStaleAccessUnits = -1;
- return;
}
// Now found the first rtp packet of the stream after seeking.
@@ -1876,14 +1859,35 @@
continue;
}
-
if (addMediaTimestamp(trackIndex, track, accessUnit)) {
postQueueAccessUnit(trackIndex, accessUnit);
}
}
+ return OK;
+ }
- if (addMediaTimestamp(trackIndex, track, accessUnit)) {
- postQueueAccessUnit(trackIndex, accessUnit);
+ void onAccessUnitComplete(
+ int32_t trackIndex, const sp<ABuffer> &accessUnit) {
+ TrackInfo *track = &mTracks.editItemAt(trackIndex);
+ track->mPackets.push_back(accessUnit);
+
+ uint32_t seqNum = (uint32_t)accessUnit->int32Data();
+ ALOGV("onAccessUnitComplete track %d storing accessunit %u", trackIndex, seqNum);
+
+ if(!mPlayResponseParsed){
+ ALOGV("play response is not parsed");
+ return;
+ }
+
+ handleFirstAccessUnit();
+
+ if (!mAllTracksHaveTime) {
+ ALOGV("storing accessUnit, no time established yet");
+ return;
+ }
+
+ if (OK != processAccessUnitQueue(trackIndex)) {
+ return;
}
if (track->mEOSReceived) {
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 90f1a77..e39dcdd 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -927,6 +927,10 @@
if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER; // sequence number
MtpStringBuffer name, created, modified;
if (!mData.getString(name)) return MTP_RESPONSE_INVALID_PARAMETER; // file name
+ if (name.getCharCount() == 0) {
+ ALOGE("empty name");
+ return MTP_RESPONSE_INVALID_PARAMETER;
+ }
if (!mData.getString(created)) return MTP_RESPONSE_INVALID_PARAMETER; // date created
if (!mData.getString(modified)) return MTP_RESPONSE_INVALID_PARAMETER; // date modified
// keywords follow
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 1118959..89f2d9c 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -148,7 +148,14 @@
EXPORT
bool AMediaExtractor_advance(AMediaExtractor *mData) {
//ALOGV("advance");
- return mData->mImpl->advance();
+ status_t err = mData->mImpl->advance();
+ if (err == ERROR_END_OF_STREAM) {
+ return false;
+ } else if (err != OK) {
+ ALOGE("sf error code: %d", err);
+ return false;
+ }
+ return true;
}
EXPORT
@@ -343,9 +350,9 @@
const void *key;
size_t keysize;
- if (meta->findData(kKeyCryptoIV, &type, &key, &keysize)) {
+ if (meta->findData(kKeyCryptoKey, &type, &key, &keysize)) {
if (keysize != 16) {
- // IVs must be 16 bytes in length.
+ // Keys must be 16 bytes in length.
return NULL;
}
}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 79f4a66..fec3a57 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1104,14 +1104,20 @@
// AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface
if (ioHandle == AUDIO_IO_HANDLE_NONE) {
Mutex::Autolock _l(mLock);
- status_t final_result = NO_ERROR;
+ // result will remain NO_INIT if no audio device is present
+ status_t final_result = NO_INIT;
{
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_PARAMETER;
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
status_t result = dev->set_parameters(dev, keyValuePairs.string());
- final_result = result ?: final_result;
+ // return success if at least one audio device accepts the parameters as not all
+ // HALs are requested to support all parameters. If no audio device supports the
+ // requested parameters, the last error is reported.
+ if (final_result != NO_ERROR) {
+ final_result = result;
+ }
}
mHardwareStatus = AUDIO_HW_IDLE;
}
@@ -1353,7 +1359,7 @@
ALOGV("%d died, releasing its sessions", pid);
size_t num = mAudioSessionRefs.size();
bool removed = false;
- for (size_t i = 0; i< num; ) {
+ for (size_t i = 0; i < num; ) {
AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
ALOGV(" pid %d @ %zu", ref->mPid, i);
if (ref->mPid == pid) {
@@ -2351,7 +2357,7 @@
}
size_t num = mAudioSessionRefs.size();
- for (size_t i = 0; i< num; i++) {
+ for (size_t i = 0; i < num; i++) {
AudioSessionRef *ref = mAudioSessionRefs.editItemAt(i);
if (ref->mSessionid == audioSession && ref->mPid == caller) {
ref->mCnt++;
@@ -2372,7 +2378,7 @@
caller = pid;
}
size_t num = mAudioSessionRefs.size();
- for (size_t i = 0; i< num; i++) {
+ for (size_t i = 0; i < num; i++) {
AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
if (ref->mSessionid == audioSession && ref->mPid == caller) {
ref->mCnt--;
@@ -2390,6 +2396,18 @@
ALOGW_IF(caller != getpid_cached, "session id %d not found for pid %d", audioSession, caller);
}
+bool AudioFlinger::isSessionAcquired_l(audio_session_t audioSession)
+{
+ size_t num = mAudioSessionRefs.size();
+ for (size_t i = 0; i < num; i++) {
+ AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
+ if (ref->mSessionid == audioSession) {
+ return true;
+ }
+ }
+ return false;
+}
+
void AudioFlinger::purgeStaleEffects_l() {
ALOGV("purging stale effects");
@@ -2783,8 +2801,9 @@
sp<Client> client = registerPid(pid);
// create effect on selected output thread
+ bool pinned = (sessionId > AUDIO_SESSION_OUTPUT_MIX) && isSessionAcquired_l(sessionId);
handle = thread->createEffect_l(client, effectClient, priority, sessionId,
- &desc, enabled, &lStatus);
+ &desc, enabled, &lStatus, pinned);
if (handle != 0 && id != NULL) {
*id = handle->id();
}
@@ -2981,7 +3000,7 @@
ALOGV("updateOrphanEffectChains session %d index %zd", session, index);
if (index >= 0) {
sp<EffectChain> chain = mOrphanEffectChains.valueAt(index);
- if (chain->removeEffect_l(effect) == 0) {
+ if (chain->removeEffect_l(effect, true) == 0) {
ALOGV("updateOrphanEffectChains removing effect chain at index %zd", index);
mOrphanEffectChains.removeItemsAt(index);
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index c56dcc1..40c8a72 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -585,6 +585,7 @@
void removeNotificationClient(pid_t pid);
bool isNonOffloadableGlobalEffectEnabled_l();
void onNonOffloadableGlobalEffectEnable();
+ bool isSessionAcquired_l(audio_session_t audioSession);
// Store an effect chain to mOrphanEffectChains keyed vector.
// Called when a thread exits and effects are still attached to it.
diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp
index 2ca2cac..7b6dfcb 100644
--- a/services/audioflinger/BufferProviders.cpp
+++ b/services/audioflinger/BufferProviders.cpp
@@ -474,18 +474,18 @@
ALOGV("processFrames(%zu %zu) remaining(%zu)", *dstFrames, *srcFrames, mRemaining);
// Note dstFrames is the required number of frames.
- // Ensure consumption from src is as expected.
- //TODO: add logic to track "very accurate" consumption related to speed, original sampling
- //rate, actual frames processed.
- const size_t targetSrc = *dstFrames * mPlaybackRate.mSpeed;
- if (*srcFrames < targetSrc) { // limit dst frames to that possible
- *dstFrames = *srcFrames / mPlaybackRate.mSpeed;
- } else if (*srcFrames > targetSrc + 1) {
- *srcFrames = targetSrc + 1;
- }
-
if (!mAudioPlaybackRateValid) {
//fallback mode
+ // Ensure consumption from src is as expected.
+ // TODO: add logic to track "very accurate" consumption related to speed, original sampling
+ // rate, actual frames processed.
+
+ const size_t targetSrc = *dstFrames * mPlaybackRate.mSpeed;
+ if (*srcFrames < targetSrc) { // limit dst frames to that possible
+ *dstFrames = *srcFrames / mPlaybackRate.mSpeed;
+ } else if (*srcFrames > targetSrc + 1) {
+ *srcFrames = targetSrc + 1;
+ }
if (*dstFrames > 0) {
switch(mPlaybackRate.mFallbackMode) {
case AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT:
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index a6bc286..b588685 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -59,8 +59,9 @@
const wp<AudioFlinger::EffectChain>& chain,
effect_descriptor_t *desc,
int id,
- audio_session_t sessionId)
- : mPinned(sessionId > AUDIO_SESSION_OUTPUT_MIX),
+ audio_session_t sessionId,
+ bool pinned)
+ : mPinned(pinned),
mThread(thread), mChain(chain), mId(id), mSessionId(sessionId),
mDescriptor(*desc),
// mConfig is set by configure() and not used before then
@@ -71,7 +72,7 @@
mSuspended(false),
mAudioFlinger(thread->mAudioFlinger)
{
- ALOGV("Constructor %p", this);
+ ALOGV("Constructor %p pinned %d", this, pinned);
int lStatus;
// create effect engine from effect factory
@@ -86,6 +87,8 @@
goto Error;
}
+ setOffloaded(thread->type() == ThreadBase::OFFLOAD, thread->id());
+
ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface);
return;
Error:
@@ -98,9 +101,8 @@
{
ALOGV("Destructor %p", this);
if (mEffectInterface != NULL) {
- remove_effect_from_hal_l();
- // release effect engine
- EffectRelease(mEffectInterface);
+ ALOGW("EffectModule %p destructor called with unreleased interface", this);
+ release_l();
}
}
@@ -115,7 +117,7 @@
size_t i;
for (i = 0; i < size; i++) {
EffectHandle *h = mHandles[i];
- if (h == NULL || h->destroyed_l()) {
+ if (h == NULL || h->disconnected()) {
continue;
}
// first non destroyed handle is considered in control
@@ -143,9 +145,14 @@
return status;
}
-size_t AudioFlinger::EffectModule::removeHandle(EffectHandle *handle)
+ssize_t AudioFlinger::EffectModule::removeHandle(EffectHandle *handle)
{
Mutex::Autolock _l(mLock);
+ return removeHandle_l(handle);
+}
+
+ssize_t AudioFlinger::EffectModule::removeHandle_l(EffectHandle *handle)
+{
size_t size = mHandles.size();
size_t i;
for (i = 0; i < size; i++) {
@@ -154,9 +161,10 @@
}
}
if (i == size) {
- return size;
+ ALOGW("%s %p handle not found %p", __FUNCTION__, this, handle);
+ return BAD_VALUE;
}
- ALOGV("removeHandle() %p removed handle %p in position %zu", this, handle, i);
+ ALOGV("removeHandle_l() %p removed handle %p in position %zu", this, handle, i);
mHandles.removeAt(i);
// if removed from first place, move effect control from this handle to next in line
@@ -183,7 +191,7 @@
// the first valid handle in the list has control over the module
for (size_t i = 0; i < mHandles.size(); i++) {
EffectHandle *h = mHandles[i];
- if (h != NULL && !h->destroyed_l()) {
+ if (h != NULL && !h->disconnected()) {
return h;
}
}
@@ -191,29 +199,22 @@
return NULL;
}
-size_t AudioFlinger::EffectModule::disconnect(EffectHandle *handle, bool unpinIfLast)
+// unsafe method called when the effect parent thread has been destroyed
+ssize_t AudioFlinger::EffectModule::disconnectHandle(EffectHandle *handle, bool unpinIfLast)
{
ALOGV("disconnect() %p handle %p", this, handle);
- // keep a strong reference on this EffectModule to avoid calling the
- // destructor before we exit
- sp<EffectModule> keep(this);
- {
- if (removeHandle(handle) == 0) {
- if (!isPinned() || unpinIfLast) {
- sp<ThreadBase> thread = mThread.promote();
- if (thread != 0) {
- Mutex::Autolock _l(thread->mLock);
- thread->removeEffect_l(this);
- }
- sp<AudioFlinger> af = mAudioFlinger.promote();
- if (af != 0) {
- af->updateOrphanEffectChains(this);
- }
- AudioSystem::unregisterEffect(mId);
- }
+ Mutex::Autolock _l(mLock);
+ ssize_t numHandles = removeHandle_l(handle);
+ if ((numHandles == 0) && (!mPinned || unpinIfLast)) {
+ AudioSystem::unregisterEffect(mId);
+ sp<AudioFlinger> af = mAudioFlinger.promote();
+ if (af != 0) {
+ mLock.unlock();
+ af->updateOrphanEffectChains(this);
+ mLock.lock();
}
}
- return mHandles.size();
+ return numHandles;
}
bool AudioFlinger::EffectModule::updateState() {
@@ -279,12 +280,29 @@
mConfig.inputCfg.buffer.s32,
mConfig.inputCfg.buffer.frameCount/2);
}
+ int ret;
+ if (isProcessImplemented()) {
+ // do the actual processing in the effect engine
+ ret = (*mEffectInterface)->process(mEffectInterface,
+ &mConfig.inputCfg.buffer,
+ &mConfig.outputCfg.buffer);
+ } else {
+ if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
+ size_t frameCnt = mConfig.inputCfg.buffer.frameCount * FCC_2; //always stereo here
+ int16_t *in = mConfig.inputCfg.buffer.s16;
+ int16_t *out = mConfig.outputCfg.buffer.s16;
- // do the actual processing in the effect engine
- int ret = (*mEffectInterface)->process(mEffectInterface,
- &mConfig.inputCfg.buffer,
- &mConfig.outputCfg.buffer);
-
+ if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ for (size_t i = 0; i < frameCnt; i++) {
+ out[i] = clamp16((int32_t)out[i] + (int32_t)in[i]);
+ }
+ } else {
+ memcpy(mConfig.outputCfg.buffer.raw, mConfig.inputCfg.buffer.raw,
+ frameCnt * sizeof(int16_t));
+ }
+ }
+ ret = -ENODATA;
+ }
// force transition to IDLE state when engine is ready
if (mState == STOPPED && ret == -ENODATA) {
mDisableWaitCnt = 1;
@@ -301,7 +319,7 @@
// accumulate input onto output
sp<EffectChain> chain = mChain.promote();
if (chain != 0 && chain->activeTrackCnt() != 0) {
- size_t frameCnt = mConfig.inputCfg.buffer.frameCount * 2; //always stereo here
+ size_t frameCnt = mConfig.inputCfg.buffer.frameCount * FCC_2; //always stereo here
int16_t *in = mConfig.inputCfg.buffer.s16;
int16_t *out = mConfig.outputCfg.buffer.s16;
for (size_t i = 0; i < frameCnt; i++) {
@@ -540,6 +558,17 @@
return status;
}
+// must be called with EffectChain::mLock held
+void AudioFlinger::EffectModule::release_l()
+{
+ if (mEffectInterface != NULL) {
+ remove_effect_from_hal_l();
+ // release effect engine
+ EffectRelease(mEffectInterface);
+ mEffectInterface = NULL;
+ }
+}
+
status_t AudioFlinger::EffectModule::remove_effect_from_hal_l()
{
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
@@ -632,7 +661,7 @@
uint32_t size = (replySize == NULL) ? 0 : *replySize;
for (size_t i = 1; i < mHandles.size(); i++) {
EffectHandle *h = mHandles[i];
- if (h != NULL && !h->destroyed_l()) {
+ if (h != NULL && !h->disconnected()) {
h->commandExecuted(cmdCode, cmdSize, pCmdData, size, pReplyData);
}
}
@@ -685,7 +714,7 @@
}
for (size_t i = 1; i < mHandles.size(); i++) {
EffectHandle *h = mHandles[i];
- if (h != NULL && !h->destroyed_l()) {
+ if (h != NULL && !h->disconnected()) {
h->setEnabled(enabled);
}
}
@@ -849,8 +878,7 @@
Mutex::Autolock _l(mLock);
for (size_t i = 0; i < mHandles.size(); i++) {
EffectHandle *handle = mHandles[i];
- if (handle != NULL && !handle->destroyed_l()) {
- handle->effect().clear();
+ if (handle != NULL && !handle->disconnected()) {
if (handle->hasControl()) {
enabled = handle->enabled();
}
@@ -1077,7 +1105,7 @@
result.append("\t\t\t Pid Priority Ctrl Locked client server\n");
for (size_t i = 0; i < mHandles.size(); ++i) {
EffectHandle *handle = mHandles[i];
- if (handle != NULL && !handle->destroyed_l()) {
+ if (handle != NULL && !handle->disconnected()) {
handle->dumpToBuffer(buffer, SIZE);
result.append(buffer);
}
@@ -1103,7 +1131,7 @@
int32_t priority)
: BnEffect(),
mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL),
- mPriority(priority), mHasControl(false), mEnabled(false), mDestroyed(false)
+ mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false)
{
ALOGV("constructor %p", this);
@@ -1126,14 +1154,6 @@
AudioFlinger::EffectHandle::~EffectHandle()
{
ALOGV("Destructor %p", this);
-
- if (mEffect == 0) {
- mDestroyed = true;
- return;
- }
- mEffect->lock();
- mDestroyed = true;
- mEffect->unlock();
disconnect(false);
}
@@ -1144,13 +1164,15 @@
status_t AudioFlinger::EffectHandle::enable()
{
+ AutoMutex _l(mLock);
ALOGV("enable %p", this);
+ sp<EffectModule> effect = mEffect.promote();
+ if (effect == 0 || mDisconnected) {
+ return DEAD_OBJECT;
+ }
if (!mHasControl) {
return INVALID_OPERATION;
}
- if (mEffect == 0) {
- return DEAD_OBJECT;
- }
if (mEnabled) {
return NO_ERROR;
@@ -1158,20 +1180,20 @@
mEnabled = true;
- sp<ThreadBase> thread = mEffect->thread().promote();
+ sp<ThreadBase> thread = effect->thread().promote();
if (thread != 0) {
- thread->checkSuspendOnEffectEnabled(mEffect, true, mEffect->sessionId());
+ thread->checkSuspendOnEffectEnabled(effect, true, effect->sessionId());
}
// checkSuspendOnEffectEnabled() can suspend this same effect when enabled
- if (mEffect->suspended()) {
+ if (effect->suspended()) {
return NO_ERROR;
}
- status_t status = mEffect->setEnabled(true);
+ status_t status = effect->setEnabled(true);
if (status != NO_ERROR) {
if (thread != 0) {
- thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
+ thread->checkSuspendOnEffectEnabled(effect, false, effect->sessionId());
}
mEnabled = false;
} else {
@@ -1181,12 +1203,12 @@
Mutex::Autolock _l(t->mLock);
t->broadcast_l();
}
- if (!mEffect->isOffloadable()) {
+ if (!effect->isOffloadable()) {
if (thread->type() == ThreadBase::OFFLOAD) {
PlaybackThread *t = (PlaybackThread *)thread.get();
t->invalidateTracks(AUDIO_STREAM_MUSIC);
}
- if (mEffect->sessionId() == AUDIO_SESSION_OUTPUT_MIX) {
+ if (effect->sessionId() == AUDIO_SESSION_OUTPUT_MIX) {
thread->mAudioFlinger->onNonOffloadableGlobalEffectEnable();
}
}
@@ -1198,27 +1220,29 @@
status_t AudioFlinger::EffectHandle::disable()
{
ALOGV("disable %p", this);
+ AutoMutex _l(mLock);
+ sp<EffectModule> effect = mEffect.promote();
+ if (effect == 0 || mDisconnected) {
+ return DEAD_OBJECT;
+ }
if (!mHasControl) {
return INVALID_OPERATION;
}
- if (mEffect == 0) {
- return DEAD_OBJECT;
- }
if (!mEnabled) {
return NO_ERROR;
}
mEnabled = false;
- if (mEffect->suspended()) {
+ if (effect->suspended()) {
return NO_ERROR;
}
- status_t status = mEffect->setEnabled(false);
+ status_t status = effect->setEnabled(false);
- sp<ThreadBase> thread = mEffect->thread().promote();
+ sp<ThreadBase> thread = effect->thread().promote();
if (thread != 0) {
- thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
+ thread->checkSuspendOnEffectEnabled(effect, false, effect->sessionId());
if (thread->type() == ThreadBase::OFFLOAD) {
PlaybackThread *t = (PlaybackThread *)thread.get();
Mutex::Autolock _l(t->mLock);
@@ -1231,25 +1255,39 @@
void AudioFlinger::EffectHandle::disconnect()
{
+ ALOGV("%s %p", __FUNCTION__, this);
disconnect(true);
}
void AudioFlinger::EffectHandle::disconnect(bool unpinIfLast)
{
- ALOGV("disconnect(%s)", unpinIfLast ? "true" : "false");
- if (mEffect == 0) {
+ AutoMutex _l(mLock);
+ ALOGV("disconnect(%s) %p", unpinIfLast ? "true" : "false", this);
+ if (mDisconnected) {
+ if (unpinIfLast) {
+ android_errorWriteLog(0x534e4554, "32707507");
+ }
return;
}
- // restore suspended effects if the disconnected handle was enabled and the last one.
- if ((mEffect->disconnect(this, unpinIfLast) == 0) && mEnabled) {
- sp<ThreadBase> thread = mEffect->thread().promote();
- if (thread != 0) {
- thread->checkSuspendOnEffectEnabled(mEffect, false, mEffect->sessionId());
+ mDisconnected = true;
+ sp<ThreadBase> thread;
+ {
+ sp<EffectModule> effect = mEffect.promote();
+ if (effect != 0) {
+ thread = effect->thread().promote();
+ }
+ }
+ if (thread != 0) {
+ thread->disconnectEffectHandle(this, unpinIfLast);
+ } else {
+ ALOGW("%s Effect handle %p disconnected after thread destruction", __FUNCTION__, this);
+ // try to cleanup as much as we can
+ sp<EffectModule> effect = mEffect.promote();
+ if (effect != 0) {
+ effect->disconnectHandle(this, unpinIfLast);
}
}
- // release sp on module => module destructor can be called now
- mEffect.clear();
if (mClient != 0) {
if (mCblk != NULL) {
// unlike ~TrackBase(), mCblk is never a local new, so don't delete
@@ -1269,26 +1307,70 @@
void *pReplyData)
{
ALOGVV("command(), cmdCode: %d, mHasControl: %d, mEffect: %p",
- cmdCode, mHasControl, (mEffect == 0) ? 0 : mEffect.get());
+ cmdCode, mHasControl, mEffect.unsafe_get());
+ // reject commands reserved for internal use by audio framework if coming from outside
+ // of audioserver
+ switch(cmdCode) {
+ case EFFECT_CMD_ENABLE:
+ case EFFECT_CMD_DISABLE:
+ case EFFECT_CMD_SET_PARAM:
+ case EFFECT_CMD_SET_PARAM_DEFERRED:
+ case EFFECT_CMD_SET_PARAM_COMMIT:
+ case EFFECT_CMD_GET_PARAM:
+ break;
+ default:
+ if (cmdCode >= EFFECT_CMD_FIRST_PROPRIETARY) {
+ break;
+ }
+ android_errorWriteLog(0x534e4554, "62019992");
+ return BAD_VALUE;
+ }
+
+ if (cmdCode == EFFECT_CMD_ENABLE) {
+ if (*replySize < sizeof(int)) {
+ android_errorWriteLog(0x534e4554, "32095713");
+ return BAD_VALUE;
+ }
+ *(int *)pReplyData = NO_ERROR;
+ *replySize = sizeof(int);
+ return enable();
+ } else if (cmdCode == EFFECT_CMD_DISABLE) {
+ if (*replySize < sizeof(int)) {
+ android_errorWriteLog(0x534e4554, "32095713");
+ return BAD_VALUE;
+ }
+ *(int *)pReplyData = NO_ERROR;
+ *replySize = sizeof(int);
+ return disable();
+ }
+
+ AutoMutex _l(mLock);
+ sp<EffectModule> effect = mEffect.promote();
+ if (effect == 0 || mDisconnected) {
+ return DEAD_OBJECT;
+ }
// only get parameter command is permitted for applications not controlling the effect
if (!mHasControl && cmdCode != EFFECT_CMD_GET_PARAM) {
return INVALID_OPERATION;
}
- if (mEffect == 0) {
- return DEAD_OBJECT;
- }
if (mClient == 0) {
return INVALID_OPERATION;
}
// handle commands that are not forwarded transparently to effect engine
if (cmdCode == EFFECT_CMD_SET_PARAM_COMMIT) {
+ if (*replySize < sizeof(int)) {
+ android_errorWriteLog(0x534e4554, "32095713");
+ return BAD_VALUE;
+ }
+ *(int *)pReplyData = NO_ERROR;
+ *replySize = sizeof(int);
+
// No need to trylock() here as this function is executed in the binder thread serving a
// particular client process: no risk to block the whole media server process or mixer
// threads if we are stuck here
Mutex::Autolock _l(mCblk->lock);
-
// keep local copy of index in case of client corruption b/32220769
const uint32_t clientIndex = mCblk->clientIndex;
const uint32_t serverIndex = mCblk->serverIndex;
@@ -1322,7 +1404,7 @@
int reply = 0;
uint32_t rsize = sizeof(reply);
- status_t ret = mEffect->command(EFFECT_CMD_SET_PARAM,
+ status_t ret = effect->command(EFFECT_CMD_SET_PARAM,
size,
param,
&rsize,
@@ -1351,15 +1433,9 @@
mCblk->serverIndex = 0;
mCblk->clientIndex = 0;
return status;
- } else if (cmdCode == EFFECT_CMD_ENABLE) {
- *(int *)pReplyData = NO_ERROR;
- return enable();
- } else if (cmdCode == EFFECT_CMD_DISABLE) {
- *(int *)pReplyData = NO_ERROR;
- return disable();
}
- return mEffect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ return effect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
}
void AudioFlinger::EffectHandle::setControl(bool hasControl, bool signal, bool enabled)
@@ -1441,7 +1517,6 @@
if (mOwnInBuffer) {
delete mInBuffer;
}
-
}
// getEffectFromDesc_l() must be called with ThreadBase::mLock held
@@ -1557,13 +1632,38 @@
}
}
-// addEffect_l() must be called with PlaybackThread::mLock held
+// createEffect_l() must be called with ThreadBase::mLock held
+status_t AudioFlinger::EffectChain::createEffect_l(sp<EffectModule>& effect,
+ ThreadBase *thread,
+ effect_descriptor_t *desc,
+ int id,
+ audio_session_t sessionId,
+ bool pinned)
+{
+ Mutex::Autolock _l(mLock);
+ effect = new EffectModule(thread, this, desc, id, sessionId, pinned);
+ status_t lStatus = effect->status();
+ if (lStatus == NO_ERROR) {
+ lStatus = addEffect_ll(effect);
+ }
+ if (lStatus != NO_ERROR) {
+ effect.clear();
+ }
+ return lStatus;
+}
+
+// addEffect_l() must be called with ThreadBase::mLock held
status_t AudioFlinger::EffectChain::addEffect_l(const sp<EffectModule>& effect)
{
+ Mutex::Autolock _l(mLock);
+ return addEffect_ll(effect);
+}
+// addEffect_l() must be called with ThreadBase::mLock and EffectChain::mLock held
+status_t AudioFlinger::EffectChain::addEffect_ll(const sp<EffectModule>& effect)
+{
effect_descriptor_t desc = effect->desc();
uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
- Mutex::Autolock _l(mLock);
effect->setChain(this);
sp<ThreadBase> thread = mThread.promote();
if (thread == 0) {
@@ -1673,8 +1773,9 @@
return NO_ERROR;
}
-// removeEffect_l() must be called with PlaybackThread::mLock held
-size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect)
+// removeEffect_l() must be called with ThreadBase::mLock held
+size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect,
+ bool release)
{
Mutex::Autolock _l(mLock);
size_t size = mEffects.size();
@@ -1689,6 +1790,10 @@
mEffects[i]->state() == EffectModule::STOPPING) {
mEffects[i]->stop();
}
+ if (release) {
+ mEffects[i]->release_l();
+ }
+
if (type == EFFECT_FLAG_TYPE_AUXILIARY) {
delete[] effect->inBuffer();
} else {
@@ -1700,6 +1805,7 @@
mEffects.removeAt(i);
ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
this, i);
+
break;
}
}
@@ -1707,7 +1813,7 @@
return mEffects.size();
}
-// setDevice_l() must be called with PlaybackThread::mLock held
+// setDevice_l() must be called with ThreadBase::mLock held
void AudioFlinger::EffectChain::setDevice_l(audio_devices_t device)
{
size_t size = mEffects.size();
@@ -1716,7 +1822,7 @@
}
}
-// setMode_l() must be called with PlaybackThread::mLock held
+// setMode_l() must be called with ThreadBase::mLock held
void AudioFlinger::EffectChain::setMode_l(audio_mode_t mode)
{
size_t size = mEffects.size();
@@ -1725,7 +1831,7 @@
}
}
-// setAudioSource_l() must be called with PlaybackThread::mLock held
+// setAudioSource_l() must be called with ThreadBase::mLock held
void AudioFlinger::EffectChain::setAudioSource_l(audio_source_t source)
{
size_t size = mEffects.size();
@@ -1734,7 +1840,7 @@
}
}
-// setVolume_l() must be called with PlaybackThread::mLock or EffectChain::mLock held
+// setVolume_l() must be called with ThreadBase::mLock or EffectChain::mLock held
bool AudioFlinger::EffectChain::setVolume_l(uint32_t *left, uint32_t *right, bool force)
{
uint32_t newLeft = *left;
@@ -1795,7 +1901,7 @@
return hasControl;
}
-// resetVolume_l() must be called with PlaybackThread::mLock or EffectChain::mLock held
+// resetVolume_l() must be called with ThreadBase::mLock or EffectChain::mLock held
void AudioFlinger::EffectChain::resetVolume_l()
{
if ((mLeftVolume != UINT_MAX) && (mRightVolume != UINT_MAX)) {
@@ -1896,7 +2002,7 @@
effect->setSuspended(false);
effect->lock();
EffectHandle *handle = effect->controlHandle_l();
- if (handle != NULL && !handle->destroyed_l()) {
+ if (handle != NULL && !handle->disconnected()) {
effect->setEnabled_l(handle->enabled());
}
effect->unlock();
@@ -2057,15 +2163,49 @@
}
}
-bool AudioFlinger::EffectChain::hasSoftwareEffect() const
+void AudioFlinger::EffectChain::checkOutputFlagCompatibility(audio_output_flags_t *flags) const
+{
+ if ((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0 && !isRawCompatible()) {
+ *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
+ }
+ if ((*flags & AUDIO_OUTPUT_FLAG_FAST) != 0 && !isFastCompatible()) {
+ *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
+ }
+}
+
+void AudioFlinger::EffectChain::checkInputFlagCompatibility(audio_input_flags_t *flags) const
+{
+ if ((*flags & AUDIO_INPUT_FLAG_RAW) != 0 && !isRawCompatible()) {
+ *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_RAW);
+ }
+ if ((*flags & AUDIO_INPUT_FLAG_FAST) != 0 && !isFastCompatible()) {
+ *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_FAST);
+ }
+}
+
+bool AudioFlinger::EffectChain::isRawCompatible() const
{
Mutex::Autolock _l(mLock);
- for (size_t i = 0; i < mEffects.size(); i++) {
- if (mEffects[i]->isImplementationSoftware()) {
- return true;
+ for (const auto &effect : mEffects) {
+ if (effect->isProcessImplemented()) {
+ return false;
}
}
- return false;
+ // Allow effects without processing.
+ return true;
+}
+
+bool AudioFlinger::EffectChain::isFastCompatible() const
+{
+ Mutex::Autolock _l(mLock);
+ for (const auto &effect : mEffects) {
+ if (effect->isProcessImplemented()
+ && effect->isImplementationSoftware()) {
+ return false;
+ }
+ }
+ // Allow effects without processing or hw accelerated effects.
+ return true;
}
// isCompatibleWithThread_l() must be called with thread->mLock held
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 322c06a..864d508 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -45,7 +45,8 @@
const wp<AudioFlinger::EffectChain>& chain,
effect_descriptor_t *desc,
int id,
- audio_session_t sessionId);
+ audio_session_t sessionId,
+ bool pinned);
virtual ~EffectModule();
enum effect_state {
@@ -93,8 +94,9 @@
const wp<ThreadBase>& thread() { return mThread; }
status_t addHandle(EffectHandle *handle);
- size_t disconnect(EffectHandle *handle, bool unpinIfLast);
- size_t removeHandle(EffectHandle *handle);
+ ssize_t disconnectHandle(EffectHandle *handle, bool unpinIfLast);
+ ssize_t removeHandle(EffectHandle *handle);
+ ssize_t removeHandle_l(EffectHandle *handle);
const effect_descriptor_t& desc() const { return mDescriptor; }
wp<EffectChain>& chain() { return mChain; }
@@ -119,9 +121,12 @@
{ return (mDescriptor.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) != 0; }
bool isImplementationSoftware() const
{ return (mDescriptor.flags & EFFECT_FLAG_HW_ACC_MASK) == 0; }
+ bool isProcessImplemented() const
+ { return (mDescriptor.flags & EFFECT_FLAG_NO_PROCESS) == 0; }
status_t setOffloaded(bool offloaded, audio_io_handle_t io);
bool isOffloaded() const;
void addEffectToHal_l();
+ void release_l();
void dump(int fd, const Vector<String16>& args);
@@ -206,12 +211,17 @@
bool enabled() const { return mEnabled; }
// Getters
- int id() const { return mEffect->id(); }
+ wp<EffectModule> effect() const { return mEffect; }
+ int id() const {
+ sp<EffectModule> effect = mEffect.promote();
+ if (effect == 0) {
+ return 0;
+ }
+ return effect->id();
+ }
int priority() const { return mPriority; }
bool hasControl() const { return mHasControl; }
- sp<EffectModule> effect() const { return mEffect; }
- // destroyed_l() must be called with the associated EffectModule mLock held
- bool destroyed_l() const { return mDestroyed; }
+ bool disconnected() const { return mDisconnected; }
void dumpToBuffer(char* buffer, size_t size);
@@ -220,7 +230,8 @@
EffectHandle(const EffectHandle&);
EffectHandle& operator =(const EffectHandle&);
- sp<EffectModule> mEffect; // pointer to controlled EffectModule
+ Mutex mLock; // protects IEffect method calls
+ wp<EffectModule> mEffect; // pointer to controlled EffectModule
sp<IEffectClient> mEffectClient; // callback interface for client notifications
/*const*/ sp<Client> mClient; // client for shared memory allocation, see disconnect()
sp<IMemory> mCblkMemory; // shared memory for control block
@@ -231,8 +242,7 @@
bool mHasControl; // true if this handle is controlling the effect
bool mEnabled; // cached enable state: needed when the effect is
// restored after being suspended
- bool mDestroyed; // Set to true by destructor. Access with EffectModule
- // mLock held
+ bool mDisconnected; // Set to true by disconnect()
};
// the EffectChain class represents a group of effects associated to one audio session.
@@ -267,8 +277,15 @@
mLock.unlock();
}
+ status_t createEffect_l(sp<EffectModule>& effect,
+ ThreadBase *thread,
+ effect_descriptor_t *desc,
+ int id,
+ audio_session_t sessionId,
+ bool pinned);
status_t addEffect_l(const sp<EffectModule>& handle);
- size_t removeEffect_l(const sp<EffectModule>& handle);
+ status_t addEffect_ll(const sp<EffectModule>& handle);
+ size_t removeEffect_l(const sp<EffectModule>& handle, bool release = false);
audio_session_t sessionId() const { return mSessionId; }
void setSessionId(audio_session_t sessionId) { mSessionId = sessionId; }
@@ -326,7 +343,17 @@
void syncHalEffectsState();
- bool hasSoftwareEffect() const;
+ // flags is an ORed set of audio_output_flags_t which is updated on return.
+ void checkOutputFlagCompatibility(audio_output_flags_t *flags) const;
+
+ // flags is an ORed set of audio_input_flags_t which is updated on return.
+ void checkInputFlagCompatibility(audio_input_flags_t *flags) const;
+
+ // Is this EffectChain compatible with the RAW audio flag.
+ bool isRawCompatible() const;
+
+ // Is this EffectChain compatible with the FAST audio flag.
+ bool isFastCompatible() const;
// isCompatibleWithThread_l() must be called with thread->mLock held
bool isCompatibleWithThread_l(const sp<ThreadBase>& thread) const;
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index b0780a4..93f7ce5 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -412,6 +412,7 @@
}
ftDump->mUnderruns = underruns;
ftDump->mFramesReady = framesReady;
+ ftDump->mFramesWritten = trackFramesWritten;
}
if (anyEnabledTracks) {
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index 2326e2a..6475f22 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -168,7 +168,7 @@
uint32_t trackMask = mTrackMask;
dprintf(fd, " Fast tracks: sMaxFastTracks=%u activeMask=%#x\n",
FastMixerState::sMaxFastTracks, trackMask);
- dprintf(fd, " Index Active Full Partial Empty Recent Ready\n");
+ dprintf(fd, " Index Active Full Partial Empty Recent Ready Written\n");
for (uint32_t i = 0; i < FastMixerState::sMaxFastTracks; ++i, trackMask >>= 1) {
bool isActive = trackMask & 1;
const FastTrackDump *ftDump = &mTracks[i];
@@ -188,11 +188,13 @@
mostRecent = "?";
break;
}
- dprintf(fd, " %5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no",
+ dprintf(fd, " %5u %6s %4u %7u %5u %7s %5zu %10lld\n",
+ i, isActive ? "yes" : "no",
(underruns.mBitFields.mFull) & UNDERRUN_MASK,
(underruns.mBitFields.mPartial) & UNDERRUN_MASK,
(underruns.mBitFields.mEmpty) & UNDERRUN_MASK,
- mostRecent, ftDump->mFramesReady);
+ mostRecent, ftDump->mFramesReady,
+ (long long)ftDump->mFramesWritten);
}
}
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index ac15e7c..301c5b1 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -57,6 +57,7 @@
/*virtual*/ ~FastTrackDump() { }
FastTrackUnderruns mUnderruns;
size_t mFramesReady; // most recent value only; no long-term statistics kept
+ int64_t mFramesWritten; // last value from track
};
struct FastMixerDumpState : FastThreadDumpState {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index a671128..7423ea9 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1272,6 +1272,12 @@
desc->name, mThreadName);
return BAD_VALUE;
}
+
+ // always allow effects without processing load or latency
+ if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) {
+ return NO_ERROR;
+ }
+
audio_input_flags_t flags = mInput->flags;
if (hasFastCapture() || (flags & AUDIO_INPUT_FLAG_FAST)) {
if (flags & AUDIO_INPUT_FLAG_RAW) {
@@ -1328,6 +1334,11 @@
break;
}
}
+
+ // always allow effects without processing load or latency
+ if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) {
+ break;
+ }
if (flags & AUDIO_OUTPUT_FLAG_RAW) {
ALOGW("checkEffectCompatibility_l(): effect %s on playback thread in raw mode",
desc->name);
@@ -1391,7 +1402,8 @@
audio_session_t sessionId,
effect_descriptor_t *desc,
int *enabled,
- status_t *status)
+ status_t *status,
+ bool pinned)
{
sp<EffectModule> effect;
sp<EffectHandle> handle;
@@ -1441,14 +1453,7 @@
}
effectRegistered = true;
// create a new effect module if none present in the chain
- effect = new EffectModule(this, chain, desc, id, sessionId);
- lStatus = effect->status();
- if (lStatus != NO_ERROR) {
- goto Exit;
- }
- effect->setOffloaded(mType == OFFLOAD, mId);
-
- lStatus = chain->addEffect_l(effect);
+ lStatus = chain->createEffect_l(effect, this, desc, id, sessionId, pinned);
if (lStatus != NO_ERROR) {
goto Exit;
}
@@ -1489,6 +1494,33 @@
return handle;
}
+void AudioFlinger::ThreadBase::disconnectEffectHandle(EffectHandle *handle,
+ bool unpinIfLast)
+{
+ bool remove = false;
+ sp<EffectModule> effect;
+ {
+ Mutex::Autolock _l(mLock);
+
+ effect = handle->effect().promote();
+ if (effect == 0) {
+ return;
+ }
+ // restore suspended effects if the disconnected handle was enabled and the last one.
+ remove = (effect->removeHandle(handle) == 0) && (!effect->isPinned() || unpinIfLast);
+ if (remove) {
+ removeEffect_l(effect, true);
+ }
+ }
+ if (remove) {
+ mAudioFlinger->updateOrphanEffectChains(effect);
+ AudioSystem::unregisterEffect(effect->id());
+ if (handle->enabled()) {
+ checkSuspendOnEffectEnabled(effect, false, effect->sessionId());
+ }
+ }
+}
+
sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(audio_session_t sessionId,
int effectId)
{
@@ -1549,9 +1581,9 @@
return NO_ERROR;
}
-void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect) {
+void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect, bool release) {
- ALOGV("removeEffect_l() %p effect %p", this, effect.get());
+ ALOGV("%s %p effect %p", __FUNCTION__, this, effect.get());
effect_descriptor_t desc = effect->desc();
if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
detachAuxEffect_l(effect->id());
@@ -1560,7 +1592,7 @@
sp<EffectChain> chain = effect->chain().promote();
if (chain != 0) {
// remove effect chain if removing last effect
- if (chain->removeEffect_l(effect) == 0) {
+ if (chain->removeEffect_l(effect, release) == 0) {
removeEffectChain_l(chain);
}
} else {
@@ -1815,6 +1847,15 @@
audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
String8 flagsAsString = outputFlagsToString(flags);
dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n", output, flags, flagsAsString.string());
+ dprintf(fd, " Frames written: %lld\n", (long long)mFramesWritten);
+ dprintf(fd, " Suspended frames: %lld\n", (long long)mSuspendedFrames);
+ if (mPipeSink.get() != nullptr) {
+ dprintf(fd, " PipeSink frames written: %lld\n", (long long)mPipeSink->framesWritten());
+ }
+ if (output != nullptr) {
+ dprintf(fd, " Hal stream dump:\n");
+ (void)output->stream->common.dump(&output->stream->common, fd);
+ }
}
// Thread virtuals
@@ -1898,34 +1939,19 @@
// check compatibility with audio effects.
{ // scope for mLock
Mutex::Autolock _l(mLock);
- // do not accept RAW flag if post processing are present. Note that post processing on
- // a fast mixer are necessarily hardware
- sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
- if (chain != 0) {
- ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0,
- "AUDIO_OUTPUT_FLAG_RAW denied: post processing effect present");
- *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
- }
- // Do not accept FAST flag if software global effects are present
- chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
- if (chain != 0) {
- ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0,
- "AUDIO_OUTPUT_FLAG_RAW denied: global effect present");
- *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
- if (chain->hasSoftwareEffect()) {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: software global effect present");
- *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
- }
- }
- // Do not accept FAST flag if the session has software effects
- chain = getEffectChain_l(sessionId);
- if (chain != 0) {
- ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_RAW) != 0,
- "AUDIO_OUTPUT_FLAG_RAW denied: effect present on session");
- *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_RAW);
- if (chain->hasSoftwareEffect()) {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: software effect present on session");
- *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
+ for (audio_session_t session : {
+ AUDIO_SESSION_OUTPUT_STAGE,
+ AUDIO_SESSION_OUTPUT_MIX,
+ sessionId,
+ }) {
+ sp<EffectChain> chain = getEffectChain_l(session);
+ if (chain.get() != nullptr) {
+ audio_output_flags_t old = *flags;
+ chain->checkOutputFlagCompatibility(flags);
+ if (old != *flags) {
+ ALOGV("AUDIO_OUTPUT_FLAGS denied by effect, session=%d old=%#x new=%#x",
+ (int)session, (int)old, (int)*flags);
+ }
}
}
}
@@ -3106,9 +3132,9 @@
if (!keepWakeLock()) {
releaseWakeLock_l();
released = true;
+ mWakeLockUids.clear();
+ mActiveTracksGeneration++;
}
- mWakeLockUids.clear();
- mActiveTracksGeneration++;
ALOGV("wait async completion");
mWaitWorkCV.wait(mLock);
ALOGV("async completion/wake");
@@ -4577,10 +4603,25 @@
return mixerStatus;
}
+// trackCountForUid_l() must be called with ThreadBase::mLock held
+uint32_t AudioFlinger::PlaybackThread::trackCountForUid_l(uid_t uid)
+{
+ uint32_t trackCount = 0;
+ for (size_t i = 0; i < mTracks.size() ; i++) {
+ if (mTracks[i]->uid() == (int)uid) {
+ trackCount++;
+ }
+ }
+ return trackCount;
+}
+
// getTrackName_l() must be called with ThreadBase::mLock held
int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask,
- audio_format_t format, audio_session_t sessionId)
+ audio_format_t format, audio_session_t sessionId, uid_t uid)
{
+ if (trackCountForUid_l(uid) > (PlaybackThread::kMaxTracksPerUid - 1)) {
+ return -1;
+ }
return mAudioMixer->getTrackName(channelMask, format, sessionId);
}
@@ -4685,7 +4726,7 @@
mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
for (size_t i = 0; i < mTracks.size() ; i++) {
int name = getTrackName_l(mTracks[i]->mChannelMask,
- mTracks[i]->mFormat, mTracks[i]->mSessionId);
+ mTracks[i]->mFormat, mTracks[i]->mSessionId, mTracks[i]->uid());
if (name < 0) {
break;
}
@@ -5130,8 +5171,11 @@
// getTrackName_l() must be called with ThreadBase::mLock held
int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask __unused,
- audio_format_t format __unused, audio_session_t sessionId __unused)
+ audio_format_t format __unused, audio_session_t sessionId __unused, uid_t uid)
{
+ if (trackCountForUid_l(uid) > (PlaybackThread::kMaxTracksPerUid - 1)) {
+ return -1;
+ }
return 0;
}
@@ -6556,12 +6600,11 @@
// Do not accept FAST flag if the session has software effects
sp<EffectChain> chain = getEffectChain_l(sessionId);
if (chain != 0) {
- ALOGV_IF((*flags & AUDIO_INPUT_FLAG_RAW) != 0,
- "AUDIO_INPUT_FLAG_RAW denied: effect present on session");
- *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_RAW);
- if (chain->hasSoftwareEffect()) {
- ALOGV("AUDIO_INPUT_FLAG_FAST denied: software effect present on session");
- *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_FAST);
+ audio_input_flags_t old = *flags;
+ chain->checkInputFlagCompatibility(flags);
+ if (old != *flags) {
+ ALOGV("AUDIO_INPUT_FLAGS denied by effect old=%#x new=%#x",
+ (int)old, (int)*flags);
}
}
ALOGV_IF((*flags & AUDIO_INPUT_FLAG_FAST) != 0,
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 1d5d3c8..ebeabb5 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -295,7 +295,8 @@
audio_session_t sessionId,
effect_descriptor_t *desc,
int *enabled,
- status_t *status /*non-NULL*/);
+ status_t *status /*non-NULL*/,
+ bool pinned);
// return values for hasAudioSession (bit field)
enum effect_state {
@@ -334,7 +335,9 @@
status_t addEffect_l(const sp< EffectModule>& effect);
// remove and effect module. Also removes the effect chain is this was the last
// effect
- void removeEffect_l(const sp< EffectModule>& effect);
+ void removeEffect_l(const sp< EffectModule>& effect, bool release = false);
+ // disconnect an effect handle from module and destroy module if last handle
+ void disconnectEffectHandle(EffectHandle *handle, bool unpinIfLast);
// detach all tracks connected to an auxiliary effect
virtual void detachAuxEffect_l(int effectId __unused) {}
// returns a combination of:
@@ -506,6 +509,8 @@
static const int8_t kMaxTrackRetriesOffload = 20;
static const int8_t kMaxTrackStartupRetriesOffload = 100;
static const int8_t kMaxTrackStopRetriesOffload = 2;
+ // 14 tracks max per client allows for 2 misbehaving application leaving 4 available tracks.
+ static const uint32_t kMaxTracksPerUid = 14;
PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady);
@@ -736,8 +741,8 @@
// Allocate a track name for a given channel mask.
// Returns name >= 0 if successful, -1 on failure.
- virtual int getTrackName_l(audio_channel_mask_t channelMask,
- audio_format_t format, audio_session_t sessionId) = 0;
+ virtual int getTrackName_l(audio_channel_mask_t channelMask, audio_format_t format,
+ audio_session_t sessionId, uid_t uid) = 0;
virtual void deleteTrackName_l(int name) = 0;
// Time to sleep between cycles when:
@@ -767,6 +772,8 @@
&& mHwSupportsPause
&& (mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC); }
+ uint32_t trackCountForUid_l(uid_t uid);
+
private:
friend class AudioFlinger; // for numerous
@@ -889,8 +896,8 @@
protected:
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
- virtual int getTrackName_l(audio_channel_mask_t channelMask,
- audio_format_t format, audio_session_t sessionId);
+ virtual int getTrackName_l(audio_channel_mask_t channelMask, audio_format_t format,
+ audio_session_t sessionId, uid_t uid);
virtual void deleteTrackName_l(int name);
virtual uint32_t idleSleepTimeUs() const;
virtual uint32_t suspendSleepTimeUs() const;
@@ -969,8 +976,8 @@
virtual void flushHw_l();
protected:
- virtual int getTrackName_l(audio_channel_mask_t channelMask,
- audio_format_t format, audio_session_t sessionId);
+ virtual int getTrackName_l(audio_channel_mask_t channelMask, audio_format_t format,
+ audio_session_t sessionId, uid_t uid);
virtual void deleteTrackName_l(int name);
virtual uint32_t activeSleepTimeUs() const;
virtual uint32_t idleSleepTimeUs() const;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 72b2252..a3f3ea5 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -419,7 +419,7 @@
}
mServerProxy = mAudioTrackServerProxy;
- mName = thread->getTrackName_l(channelMask, format, sessionId);
+ mName = thread->getTrackName_l(channelMask, format, sessionId, uid);
if (mName < 0) {
ALOGE("no more track names available");
return;
@@ -808,6 +808,13 @@
Mutex::Autolock _l(thread->mLock);
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ // Flush the ring buffer now if the track is not active in the PlaybackThread.
+ // Otherwise the flush would not be done until the track is resumed.
+ // Requires FastTrack removal be BLOCK_UNTIL_ACKED
+ if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+ (void)mServerProxy->flushBufferIfNeeded();
+ }
+
if (isOffloaded()) {
// If offloaded we allow flush during any state except terminated
// and keep the track active to avoid problems if user is seeking
@@ -859,6 +866,10 @@
if (!isOffloaded() && !isDirect())
return;
+ // Clear the client ring buffer so that the app can prime the buffer while paused.
+ // Otherwise it might not get cleared until playback is resumed and obtainBuffer() is called.
+ mServerProxy->flushBufferIfNeeded();
+
mFlushHwPending = false;
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 46309ed..f2b39f2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -93,7 +93,9 @@
sp<AudioInputDescriptor> getInputFromId(audio_port_handle_t id) const;
- uint32_t activeInputsCount() const;
+ // count active capture sessions using one of the specified devices.
+ // ignore devices if AUDIO_DEVICE_IN_DEFAULT is passed
+ uint32_t activeInputsCountOnDevices(audio_devices_t devices = AUDIO_DEVICE_IN_DEFAULT) const;
/**
* return io handle of active input or 0 if no input is active
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 6dacaa4..c7d2ee4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -222,12 +222,14 @@
return inputDesc;
}
-uint32_t AudioInputCollection::activeInputsCount() const
+uint32_t AudioInputCollection::activeInputsCountOnDevices(audio_devices_t devices) const
{
uint32_t count = 0;
for (size_t i = 0; i < size(); i++) {
const sp<AudioInputDescriptor> inputDescriptor = valueAt(i);
- if (inputDescriptor->isActive()) {
+ if (inputDescriptor->isActive() &&
+ ((devices == AUDIO_DEVICE_IN_DEFAULT) ||
+ ((inputDescriptor->mDevice & devices & ~AUDIO_DEVICE_BIT_IN) != 0))) {
count++;
}
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index b752541..968b80f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1315,16 +1315,23 @@
// force restoring the device selection on other active outputs if it differs from the
// one being selected for this output
+ uint32_t delayMs = outputDesc->latency()*2;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (desc != outputDesc &&
desc->isActive() &&
outputDesc->sharesHwModuleWith(desc) &&
(newDevice != desc->device())) {
+ audio_devices_t newDevice2 = getNewOutputDevice(desc, false /*fromCache*/);
+ bool force = desc->device() != newDevice2;
setOutputDevice(desc,
- getNewOutputDevice(desc, false /*fromCache*/),
- true,
- outputDesc->latency()*2);
+ newDevice2,
+ force,
+ delayMs);
+ // re-apply device specific volume if not done by setOutputDevice()
+ if (!force) {
+ applyStreamVolumes(desc, newDevice2, delayMs);
+ }
}
}
// update the outputs if stopping one with a stream that can affect notification routing
@@ -1681,10 +1688,15 @@
MIX_STATE_MIXING);
}
- if (mInputs.activeInputsCount() == 0) {
+ // indicate active capture to sound trigger service if starting capture from a mic on
+ // primary HW module
+ audio_devices_t device = getNewInputDevice(input);
+ audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
+ if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+ mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
SoundTrigger::setCaptureState(true);
}
- setInputDevice(input, getNewInputDevice(input), true /* force */);
+ setInputDevice(input, device, true /* force */);
// automatically enable the remote submix output when input is started if not
// used by a policy mix of type MIX_TYPE_RECORDERS
@@ -1761,9 +1773,14 @@
}
}
+ audio_devices_t device = inputDesc->mDevice;
resetInputDevice(input);
- if (mInputs.activeInputsCount() == 0) {
+ // indicate inactive capture to sound trigger service if stopping capture from a mic on
+ // primary HW module
+ audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
+ if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+ mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
SoundTrigger::setCaptureState(false);
}
inputDesc->clearPreemptedSessions();
@@ -1901,19 +1918,21 @@
continue;
}
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
- audio_devices_t curStreamDevice = getDeviceForStrategy(curStrategy, true /*fromCache*/);
- if ((curStreamDevice & device) == 0) {
+ audio_devices_t curStreamDevice = getDeviceForStrategy(curStrategy, false /*fromCache*/);
+ if ((device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) &&
+ ((curStreamDevice & device) == 0)) {
continue;
}
- bool applyDefault = false;
+ bool applyVolume;
if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
curStreamDevice |= device;
- } else if (!mVolumeCurves->hasVolumeIndexForDevice(
- stream, Volume::getDeviceForVolume(curStreamDevice))) {
- applyDefault = true;
+ applyVolume = (curDevice & curStreamDevice) != 0;
+ } else {
+ applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
+ stream, Volume::getDeviceForVolume(curStreamDevice));
}
- if (applyDefault || ((curDevice & curStreamDevice) != 0)) {
+ if (applyVolume) {
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
@@ -4937,6 +4956,18 @@
audio_devices_t device)
{
float volumeDB = mVolumeCurves->volIndexToDb(stream, Volume::getDeviceCategory(device), index);
+
+ // handle the case of accessibility active while a ringtone is playing: if the ringtone is much
+ // louder than the accessibility prompt, the prompt cannot be heard, thus masking the touch
+ // exploration of the dialer UI. In this situation, bring the accessibility volume closer to
+ // the ringtone volume
+ if ((stream == AUDIO_STREAM_ACCESSIBILITY)
+ && (AUDIO_MODE_RINGTONE == mEngine->getPhoneState())
+ && isStreamActive(AUDIO_STREAM_RING, 0)) {
+ const float ringVolumeDB = computeVolume(AUDIO_STREAM_RING, index, device);
+ return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
+ }
+
// if a headset is connected, apply the following rules to ring tones and notifications
// to avoid sound level bursts in user's ears:
// - always attenuate notifications volume by 6dB
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 475304b..a41ed95 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -385,7 +385,6 @@
sp<AudioPolicyEffects>audioPolicyEffects;
{
Mutex::Autolock _l(mLock);
- mAudioPolicyManager->releaseInput(input, session);
audioPolicyEffects = mAudioPolicyEffects;
}
if (audioPolicyEffects != 0) {
@@ -395,6 +394,10 @@
ALOGW("Failed to release effects on input %d", input);
}
}
+ {
+ Mutex::Autolock _l(mLock);
+ mAudioPolicyManager->releaseInput(input, session);
+ }
}
status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 9d5f33c..9a7839b 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -238,6 +238,10 @@
{
String8 supportedPreviewFpsRange;
for (size_t i=0; i < availableFpsRanges.count; i += 2) {
+ if (!isFpsSupported(availablePreviewSizes,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, availableFpsRanges.data.i32[i+1])) {
+ continue;
+ }
if (i != 0) supportedPreviewFpsRange += ",";
supportedPreviewFpsRange += String8::format("(%d,%d)",
availableFpsRanges.data.i32[i] * kFpsToApiScale,
@@ -255,7 +259,10 @@
// from the [min, max] fps range use the max value
int fps = fpsFromRange(availableFpsRanges.data.i32[i],
availableFpsRanges.data.i32[i+1]);
-
+ if (!isFpsSupported(availablePreviewSizes,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, fps)) {
+ continue;
+ }
// de-dupe frame rates
if (sortedPreviewFrameRates.indexOf(fps) == NAME_NOT_FOUND) {
sortedPreviewFrameRates.add(fps);
@@ -951,21 +958,40 @@
return NO_INIT;
}
+ // Get supported preview fps ranges.
+ Vector<Size> supportedPreviewSizes;
+ Vector<FpsRange> supportedPreviewFpsRanges;
+ const Size PREVIEW_SIZE_BOUND = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
+ status_t res = getFilteredSizes(PREVIEW_SIZE_BOUND, &supportedPreviewSizes);
+ if (res != OK) return res;
+ for (size_t i=0; i < availableFpsRanges.count; i += 2) {
+ if (!isFpsSupported(supportedPreviewSizes,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, availableFpsRanges.data.i32[i+1])) {
+ continue;
+ }
+ FpsRange fpsRange = {availableFpsRanges.data.i32[i], availableFpsRanges.data.i32[i+1]};
+ supportedPreviewFpsRanges.add(fpsRange);
+ }
+ if (supportedPreviewFpsRanges.size() == 0) {
+ ALOGE("Supported preview fps range is empty");
+ return NO_INIT;
+ }
+
int32_t bestStillCaptureFpsRange[2] = {
- availableFpsRanges.data.i32[0], availableFpsRanges.data.i32[1]
+ supportedPreviewFpsRanges[0].low, supportedPreviewFpsRanges[0].high
};
int32_t curRange =
bestStillCaptureFpsRange[1] - bestStillCaptureFpsRange[0];
- for (size_t i = 2; i < availableFpsRanges.count; i += 2) {
+ for (size_t i = 1; i < supportedPreviewFpsRanges.size(); i ++) {
int32_t nextRange =
- availableFpsRanges.data.i32[i + 1] -
- availableFpsRanges.data.i32[i];
+ supportedPreviewFpsRanges[i].high -
+ supportedPreviewFpsRanges[i].low;
if ( (nextRange > curRange) || // Maximize size of FPS range first
(nextRange == curRange && // Then minimize low-end FPS
- bestStillCaptureFpsRange[0] > availableFpsRanges.data.i32[i])) {
+ bestStillCaptureFpsRange[0] > supportedPreviewFpsRanges[i].low)) {
- bestStillCaptureFpsRange[0] = availableFpsRanges.data.i32[i];
- bestStillCaptureFpsRange[1] = availableFpsRanges.data.i32[i + 1];
+ bestStillCaptureFpsRange[0] = supportedPreviewFpsRanges[i].low;
+ bestStillCaptureFpsRange[1] = supportedPreviewFpsRanges[i].high;
curRange = nextRange;
}
}
@@ -2836,22 +2862,7 @@
int64_t Parameters::getJpegStreamMinFrameDurationNs(Parameters::Size size) {
if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
- const int STREAM_DURATION_SIZE = 4;
- const int STREAM_FORMAT_OFFSET = 0;
- const int STREAM_WIDTH_OFFSET = 1;
- const int STREAM_HEIGHT_OFFSET = 2;
- const int STREAM_DURATION_OFFSET = 3;
- camera_metadata_ro_entry_t availableStreamMinDurations =
- staticInfo(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
- for (size_t i = 0; i < availableStreamMinDurations.count; i+= STREAM_DURATION_SIZE) {
- int64_t format = availableStreamMinDurations.data.i64[i + STREAM_FORMAT_OFFSET];
- int64_t width = availableStreamMinDurations.data.i64[i + STREAM_WIDTH_OFFSET];
- int64_t height = availableStreamMinDurations.data.i64[i + STREAM_HEIGHT_OFFSET];
- int64_t duration = availableStreamMinDurations.data.i64[i + STREAM_DURATION_OFFSET];
- if (format == HAL_PIXEL_FORMAT_BLOB && width == size.width && height == size.height) {
- return duration;
- }
- }
+ return getMinFrameDurationNs(size, HAL_PIXEL_FORMAT_BLOB);
} else {
Vector<Size> availableJpegSizes = getAvailableJpegSizes();
size_t streamIdx = availableJpegSizes.size();
@@ -2875,6 +2886,57 @@
return -1;
}
+int64_t Parameters::getMinFrameDurationNs(Parameters::Size size, int fmt) {
+ if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+ ALOGE("Min frame duration for HAL 3.1 or lower is not supported");
+ return -1;
+ }
+
+ const int STREAM_DURATION_SIZE = 4;
+ const int STREAM_FORMAT_OFFSET = 0;
+ const int STREAM_WIDTH_OFFSET = 1;
+ const int STREAM_HEIGHT_OFFSET = 2;
+ const int STREAM_DURATION_OFFSET = 3;
+ camera_metadata_ro_entry_t availableStreamMinDurations =
+ staticInfo(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
+ for (size_t i = 0; i < availableStreamMinDurations.count; i+= STREAM_DURATION_SIZE) {
+ int64_t format = availableStreamMinDurations.data.i64[i + STREAM_FORMAT_OFFSET];
+ int64_t width = availableStreamMinDurations.data.i64[i + STREAM_WIDTH_OFFSET];
+ int64_t height = availableStreamMinDurations.data.i64[i + STREAM_HEIGHT_OFFSET];
+ int64_t duration = availableStreamMinDurations.data.i64[i + STREAM_DURATION_OFFSET];
+ if (format == fmt && width == size.width && height == size.height) {
+ return duration;
+ }
+ }
+
+ return -1;
+}
+
+bool Parameters::isFpsSupported(const Vector<Size> &sizes, int format, int32_t fps) {
+ // Skip the check for older HAL version, as the min duration is not supported.
+ if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+ return true;
+ }
+
+ // Get min frame duration for each size and check if the given fps range can be supported.
+ const int32_t FPS_MARGIN = 1;
+ for (size_t i = 0 ; i < sizes.size(); i++) {
+ int64_t minFrameDuration = getMinFrameDurationNs(sizes[i], format);
+ if (minFrameDuration <= 0) {
+ ALOGE("Min frame duration (%" PRId64") for size (%dx%d) and format 0x%x is wrong!",
+ minFrameDuration, sizes[i].width, sizes[i].height, format);
+ return false;
+ }
+ int32_t maxSupportedFps = 1e9 / minFrameDuration;
+ // Add some margin here for the case where the hal supports 29.xxxfps.
+ maxSupportedFps += FPS_MARGIN;
+ if (fps > maxSupportedFps) {
+ return false;
+ }
+ }
+ return true;
+}
+
SortedVector<int32_t> Parameters::getAvailableOutputFormats() {
SortedVector<int32_t> outputFormats; // Non-duplicated output formats
if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index f4bb34c..687ac3e 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -115,6 +115,11 @@
int32_t height;
};
+ struct FpsRange {
+ int32_t low;
+ int32_t high;
+ };
+
int32_t exposureCompensation;
bool autoExposureLock;
bool autoWhiteBalanceLock;
@@ -390,6 +395,15 @@
// return -1 if input jpeg size cannot be found in supported size list
int64_t getJpegStreamMinFrameDurationNs(Parameters::Size size);
+ // Helper function to get minimum frame duration for a size/format combination
+ // return -1 if input size/format combination cannot be found.
+ int64_t getMinFrameDurationNs(Parameters::Size size, int format);
+
+ // Helper function to check if a given fps is supported by all the sizes with
+ // the same format.
+ // return true if the device doesn't support min frame duration metadata tag.
+ bool isFpsSupported(const Vector<Size> &size, int format, int32_t fps);
+
// Helper function to get non-duplicated available output formats
SortedVector<int32_t> getAvailableOutputFormats();
// Helper function to get available output jpeg sizes
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index aeab451..48a2a99 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -170,6 +170,9 @@
return res;
}
+ /** Register in-flight map to the status tracker */
+ mInFlightStatusId = mStatusTracker->addComponent();
+
/** Create buffer manager */
mBufferManager = new Camera3BufferManager();
@@ -2196,6 +2199,10 @@
aeTriggerCancelOverride));
if (res < 0) return res;
+ if (mInFlightMap.size() == 1) {
+ mStatusTracker->markComponentActive(mInFlightStatusId);
+ }
+
return OK;
}
@@ -2252,6 +2259,11 @@
mInFlightMap.removeItemsAt(idx, 1);
+ // Indicate idle inFlightMap to the status tracker
+ if (mInFlightMap.size() == 0) {
+ mStatusTracker->markComponentIdle(mInFlightStatusId, Fence::NO_FENCE);
+ }
+
ALOGVV("%s: removed frame %d from InFlightMap", __FUNCTION__, frameNumber);
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 3244258..17893a9 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -725,6 +725,7 @@
Mutex mInFlightLock; // Protects mInFlightMap
InFlightMap mInFlightMap;
+ int mInFlightStatusId;
status_t registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,