Merge "Revert "adapt ld.config.txt for vndk apex""
am: 055ded38f4
Change-Id: Ib9974bdc1576d3b51b020fca148a85fc0d6bf353
diff --git a/apex/ld.config.txt b/apex/ld.config.txt
index a5937fd..af8ec06 100644
--- a/apex/ld.config.txt
+++ b/apex/ld.config.txt
@@ -37,9 +37,11 @@
namespace.platform.isolated = true
-namespace.platform.search.paths = /system/${LIB}
+namespace.platform.search.paths = /system/${LIB}
+namespace.platform.search.paths += /apex/com.android.runtime/${LIB}
namespace.platform.asan.search.paths = /data/asan/system/${LIB}
namespace.platform.asan.search.paths += /system/${LIB}
+namespace.platform.asan.search.paths += /apex/com.android.runtime/${LIB}
# /system/lib/libc.so, etc are symlinks to /apex/com.android.lib/lib/bionic/libc.so, etc.
# Add /apex/... pat to the permitted paths because linker uses realpath(3)
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 3328a85..9e034c4 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -138,6 +138,7 @@
private:
friend ACameraCaptureSession;
+ friend ACameraDevice;
camera_status_t checkCameraClosedOrErrorLocked() const;
@@ -387,7 +388,6 @@
mDevice(new android::acam::CameraDevice(id, cb, std::move(chars), this)) {}
~ACameraDevice();
-
/*******************
* NDK public APIs *
*******************/
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 37de30a..7ab0124 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -24,6 +24,7 @@
#include <algorithm>
#include <mutex>
#include <string>
+#include <variant>
#include <vector>
#include <stdio.h>
#include <stdio.h>
@@ -49,6 +50,7 @@
static constexpr int kTestImageFormat = AIMAGE_FORMAT_YUV_420_888;
using android::hardware::camera::common::V1_0::helper::VendorTagDescriptorCache;
+using ConfiguredWindows = std::set<native_handle_t *>;
class CameraHelper {
public:
@@ -60,9 +62,12 @@
const char* physicalCameraId;
native_handle_t* anw;
};
- int initCamera(native_handle_t* imgReaderAnw,
+
+ // Retaining the error code in case the caller needs to analyze it.
+ std::variant<int, ConfiguredWindows> initCamera(native_handle_t* imgReaderAnw,
const std::vector<PhysicalImgReaderInfo>& physicalImgReaders,
bool usePhysicalSettings) {
+ ConfiguredWindows configuredWindows;
if (imgReaderAnw == nullptr) {
ALOGE("Cannot initialize camera before image reader get initialized.");
return -1;
@@ -78,7 +83,7 @@
ret = ACameraManager_openCamera(mCameraManager, mCameraId, &mDeviceCb, &mDevice);
if (ret != AMEDIA_OK || mDevice == nullptr) {
ALOGE("Failed to open camera, ret=%d, mDevice=%p.", ret, mDevice);
- return -1;
+ return ret;
}
// Create capture session
@@ -97,8 +102,9 @@
ALOGE("ACaptureSessionOutputContainer_add failed, ret=%d", ret);
return ret;
}
-
+ configuredWindows.insert(mImgReaderAnw);
std::vector<const char*> idPointerList;
+ std::set<const native_handle_t*> physicalStreamMap;
for (auto& physicalStream : physicalImgReaders) {
ACaptureSessionOutput* sessionOutput = nullptr;
ret = ACaptureSessionPhysicalOutput_create(physicalStream.anw,
@@ -112,21 +118,25 @@
ALOGE("ACaptureSessionOutputContainer_add failed, ret=%d", ret);
return ret;
}
- mExtraOutputs.push_back(sessionOutput);
+ ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
+ if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
+ ALOGW("ACameraDevice_isSessionConfigurationSupported failed, ret=%d camera id %s",
+ ret, mCameraId);
+ ACaptureSessionOutputContainer_remove(mOutputs, sessionOutput);
+ ACaptureSessionOutput_free(sessionOutput);
+ continue;
+ }
+ configuredWindows.insert(physicalStream.anw);
// Assume that at most one physical stream per physical camera.
mPhysicalCameraIds.push_back(physicalStream.physicalCameraId);
idPointerList.push_back(physicalStream.physicalCameraId);
+ physicalStreamMap.insert(physicalStream.anw);
+ mSessionPhysicalOutputs.push_back(sessionOutput);
}
ACameraIdList cameraIdList;
cameraIdList.numCameras = idPointerList.size();
cameraIdList.cameraIds = idPointerList.data();
- ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
- if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
- ALOGE("ACameraDevice_isSessionConfigurationSupported failed, ret=%d", ret);
- return ret;
- }
-
ret = ACameraDevice_createCaptureSession(mDevice, mOutputs, &mSessionCb, &mSession);
if (ret != AMEDIA_OK) {
ALOGE("ACameraDevice_createCaptureSession failed, ret=%d", ret);
@@ -157,6 +167,10 @@
}
for (auto& physicalStream : physicalImgReaders) {
+ if (physicalStreamMap.find(physicalStream.anw) == physicalStreamMap.end()) {
+ ALOGI("Skipping physicalStream anw=%p", physicalStream.anw);
+ continue;
+ }
ACameraOutputTarget* outputTarget = nullptr;
ret = ACameraOutputTarget_create(physicalStream.anw, &outputTarget);
if (ret != AMEDIA_OK) {
@@ -168,11 +182,11 @@
ALOGE("ACaptureRequest_addTarget failed, ret=%d", ret);
return ret;
}
- mReqExtraOutputs.push_back(outputTarget);
+ mReqPhysicalOutputs.push_back(outputTarget);
}
mIsCameraReady = true;
- return 0;
+ return configuredWindows;
}
@@ -184,10 +198,10 @@
ACameraOutputTarget_free(mReqImgReaderOutput);
mReqImgReaderOutput = nullptr;
}
- for (auto& outputTarget : mReqExtraOutputs) {
+ for (auto& outputTarget : mReqPhysicalOutputs) {
ACameraOutputTarget_free(outputTarget);
}
- mReqExtraOutputs.clear();
+ mReqPhysicalOutputs.clear();
if (mStillRequest) {
ACaptureRequest_free(mStillRequest);
mStillRequest = nullptr;
@@ -201,10 +215,10 @@
ACaptureSessionOutput_free(mImgReaderOutput);
mImgReaderOutput = nullptr;
}
- for (auto& extraOutput : mExtraOutputs) {
+ for (auto& extraOutput : mSessionPhysicalOutputs) {
ACaptureSessionOutput_free(extraOutput);
}
- mExtraOutputs.clear();
+ mSessionPhysicalOutputs.clear();
if (mOutputs) {
ACaptureSessionOutputContainer_free(mOutputs);
mOutputs = nullptr;
@@ -262,13 +276,13 @@
// Capture session
ACaptureSessionOutputContainer* mOutputs = nullptr;
ACaptureSessionOutput* mImgReaderOutput = nullptr;
- std::vector<ACaptureSessionOutput*> mExtraOutputs;
+ std::vector<ACaptureSessionOutput*> mSessionPhysicalOutputs;
ACameraCaptureSession* mSession = nullptr;
// Capture request
ACaptureRequest* mStillRequest = nullptr;
ACameraOutputTarget* mReqImgReaderOutput = nullptr;
- std::vector<ACameraOutputTarget*> mReqExtraOutputs;
+ std::vector<ACameraOutputTarget*> mReqPhysicalOutputs;
bool mIsCameraReady = false;
const char* mCameraId;
@@ -581,9 +595,11 @@
}
CameraHelper cameraHelper(id, mCameraManager);
- ret = cameraHelper.initCamera(testCase.getNativeWindow(),
- {}/*physicalImageReaders*/, false/*usePhysicalSettings*/);
- if (ret < 0) {
+ std::variant<int, ConfiguredWindows> retInit =
+ cameraHelper.initCamera(testCase.getNativeWindow(), {}/*physicalImageReaders*/,
+ false/*usePhysicalSettings*/);
+ int *retp = std::get_if<int>(&retInit);
+ if (retp) {
ALOGE("Unable to initialize camera helper");
return false;
}
@@ -751,10 +767,15 @@
physicalImgReaderInfo.push_back({physicalCameraIds[0], testCases[1]->getNativeWindow()});
physicalImgReaderInfo.push_back({physicalCameraIds[1], testCases[2]->getNativeWindow()});
- int ret = cameraHelper.initCamera(testCases[0]->getNativeWindow(),
- physicalImgReaderInfo, usePhysicalSettings);
- ASSERT_EQ(ret, 0);
-
+ std::variant<int, ConfiguredWindows> retInit =
+ cameraHelper.initCamera(testCases[0]->getNativeWindow(), physicalImgReaderInfo,
+ usePhysicalSettings);
+ int *retp = std::get_if<int>(&retInit);
+ ASSERT_EQ(retp, nullptr);
+ ConfiguredWindows *configuredWindowsp = std::get_if<ConfiguredWindows>(&retInit);
+ ASSERT_NE(configuredWindowsp, nullptr);
+ ASSERT_LE(configuredWindowsp->size(), testCases.size());
+ int ret = 0;
if (!cameraHelper.isCameraReady()) {
ALOGW("Camera is not ready after successful initialization. It's either due to camera "
"on board lacks BACKWARDS_COMPATIBLE capability or the device does not have "
@@ -776,9 +797,15 @@
break;
}
}
- ASSERT_EQ(testCases[0]->getAcquiredImageCount(), pictureCount);
- ASSERT_EQ(testCases[1]->getAcquiredImageCount(), pictureCount);
- ASSERT_EQ(testCases[2]->getAcquiredImageCount(), pictureCount);
+ for(auto &testCase : testCases) {
+ auto it = configuredWindowsp->find(testCase->getNativeWindow());
+ if (it == configuredWindowsp->end()) {
+ continue;
+ }
+ ALOGI("Testing window %p", testCase->getNativeWindow());
+ ASSERT_EQ(testCase->getAcquiredImageCount(), pictureCount);
+ }
+
ASSERT_TRUE(cameraHelper.checkCallbacks(pictureCount));
ACameraMetadata_free(staticMetadata);
diff --git a/media/bufferpool/1.0/AccessorImpl.cpp b/media/bufferpool/1.0/AccessorImpl.cpp
index fa17f15..a5366f6 100644
--- a/media/bufferpool/1.0/AccessorImpl.cpp
+++ b/media/bufferpool/1.0/AccessorImpl.cpp
@@ -151,6 +151,7 @@
newConnection->initialize(accessor, id);
*connection = newConnection;
*pConnectionId = id;
+ mBufferPool.mConnectionIds.insert(id);
++sSeqId;
}
}
@@ -305,7 +306,12 @@
found->second->mSenderValidated = true;
return true;
}
- // TODO: verify there is target connection Id
+ if (mConnectionIds.find(message.targetConnectionId) == mConnectionIds.end()) {
+ // N.B: it could be fake or receive connection already closed.
+ ALOGD("bufferpool %p receiver connection %lld is no longer valid",
+ this, (long long)message.targetConnectionId);
+ return false;
+ }
mStats.onBufferSent();
mTransactions.insert(std::make_pair(
message.transactionId,
@@ -450,6 +456,7 @@
}
}
}
+ mConnectionIds.erase(connectionId);
return true;
}
diff --git a/media/bufferpool/1.0/AccessorImpl.h b/media/bufferpool/1.0/AccessorImpl.h
index c04dbf3..84cb685 100644
--- a/media/bufferpool/1.0/AccessorImpl.h
+++ b/media/bufferpool/1.0/AccessorImpl.h
@@ -94,6 +94,7 @@
std::map<BufferId, std::unique_ptr<InternalBuffer>> mBuffers;
std::set<BufferId> mFreeBuffers;
+ std::set<ConnectionId> mConnectionIds;
/// Buffer pool statistics which tracks allocation and transfer statistics.
struct Stats {
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 94cf006..cacd465 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -163,6 +163,7 @@
*connection = newConnection;
*pConnectionId = id;
*pMsgId = mBufferPool.mInvalidation.mInvalidationId;
+ mBufferPool.mConnectionIds.insert(id);
mBufferPool.mInvalidationChannel.getDesc(invDescPtr);
mBufferPool.mInvalidation.onConnect(id, observer);
++sSeqId;
@@ -474,7 +475,12 @@
found->second->mSenderValidated = true;
return true;
}
- // TODO: verify there is target connection Id
+ if (mConnectionIds.find(message.targetConnectionId) == mConnectionIds.end()) {
+ // N.B: it could be fake or receive connection already closed.
+ ALOGD("bufferpool2 %p receiver connection %lld is no longer valid",
+ this, (long long)message.targetConnectionId);
+ return false;
+ }
mStats.onBufferSent();
mTransactions.insert(std::make_pair(
message.transactionId,
@@ -644,6 +650,7 @@
}
}
}
+ mConnectionIds.erase(connectionId);
return true;
}
@@ -774,11 +781,19 @@
std::mutex &mutex,
std::condition_variable &cv,
bool &ready) {
+ constexpr uint32_t NUM_SPIN_TO_INCREASE_SLEEP = 1024;
+ constexpr uint32_t NUM_SPIN_TO_LOG = 1024*8;
+ constexpr useconds_t MAX_SLEEP_US = 10000;
+ uint32_t numSpin = 0;
+ useconds_t sleepUs = 1;
+
while(true) {
std::map<uint32_t, const std::weak_ptr<Accessor::Impl>> copied;
{
std::unique_lock<std::mutex> lock(mutex);
if (!ready) {
+ numSpin = 0;
+ sleepUs = 1;
cv.wait(lock);
}
copied.insert(accessors.begin(), accessors.end());
@@ -800,9 +815,20 @@
if (accessors.size() == 0) {
ready = false;
} else {
- // prevent draining cpu.
+ // TODO Use an efficient way to wait over FMQ.
+ // N.B. Since there is not a efficient way to wait over FMQ,
+ // polling over the FMQ is the current way to prevent draining
+ // CPU.
lock.unlock();
- std::this_thread::yield();
+ ++numSpin;
+ if (numSpin % NUM_SPIN_TO_INCREASE_SLEEP == 0 &&
+ sleepUs < MAX_SLEEP_US) {
+ sleepUs *= 10;
+ }
+ if (numSpin % NUM_SPIN_TO_LOG == 0) {
+ ALOGW("invalidator thread spinning");
+ }
+ ::usleep(sleepUs);
}
}
}
diff --git a/media/bufferpool/2.0/AccessorImpl.h b/media/bufferpool/2.0/AccessorImpl.h
index eea72b9..807e0f1 100644
--- a/media/bufferpool/2.0/AccessorImpl.h
+++ b/media/bufferpool/2.0/AccessorImpl.h
@@ -111,6 +111,7 @@
std::map<BufferId, std::unique_ptr<InternalBuffer>> mBuffers;
std::set<BufferId> mFreeBuffers;
+ std::set<ConnectionId> mConnectionIds;
struct Invalidation {
static std::atomic<std::uint32_t> sInvSeqId;
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index 8e3852c..1dc676b 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -157,7 +157,7 @@
mSentCodecSpecificData(false),
mInputTimeSet(false),
mInputSize(0),
- mInputTimeUs(0),
+ mNextFrameTimestampUs(0),
mSignalledError(false),
mOutIndex(0u) {
}
@@ -183,7 +183,7 @@
mSentCodecSpecificData = false;
mInputTimeSet = false;
mInputSize = 0u;
- mInputTimeUs = 0;
+ mNextFrameTimestampUs = 0;
mSignalledError = false;
return C2_OK;
}
@@ -201,7 +201,7 @@
mSentCodecSpecificData = false;
mInputTimeSet = false;
mInputSize = 0u;
- mInputTimeUs = 0;
+ mNextFrameTimestampUs = 0;
return C2_OK;
}
@@ -365,17 +365,18 @@
capacity = view.capacity();
}
if (!mInputTimeSet && capacity > 0) {
- mInputTimeUs = work->input.ordinal.timestamp;
+ mNextFrameTimestampUs = work->input.ordinal.timestamp;
mInputTimeSet = true;
}
size_t numFrames = (capacity + mInputSize + (eos ? mNumBytesPerInputFrame - 1 : 0))
/ mNumBytesPerInputFrame;
- ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu mNumBytesPerInputFrame = %u",
- capacity, mInputSize, numFrames, mNumBytesPerInputFrame);
+ ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu "
+ "mNumBytesPerInputFrame = %u inputTS = %lld",
+ capacity, mInputSize, numFrames,
+ mNumBytesPerInputFrame, work->input.ordinal.timestamp.peekll());
std::shared_ptr<C2LinearBlock> block;
- std::shared_ptr<C2Buffer> buffer;
std::unique_ptr<C2WriteView> wView;
uint8_t *outPtr = temp;
size_t outAvailable = 0u;
@@ -442,7 +443,11 @@
const std::shared_ptr<C2Buffer> mBuffer;
};
- C2WorkOrdinalStruct outOrdinal = work->input.ordinal;
+ struct OutputBuffer {
+ std::shared_ptr<C2Buffer> buffer;
+ c2_cntr64_t timestampUs;
+ };
+ std::list<OutputBuffer> outputBuffers;
while (encoderErr == AACENC_OK && inargs.numInSamples > 0) {
if (numFrames && !block) {
@@ -473,29 +478,22 @@
&outargs);
if (encoderErr == AACENC_OK) {
- if (buffer) {
- outOrdinal.frameIndex = mOutIndex++;
- outOrdinal.timestamp = mInputTimeUs;
- cloneAndSend(
- inputIndex,
- work,
- FillWork(C2FrameData::FLAG_INCOMPLETE, outOrdinal, buffer));
- buffer.reset();
- }
-
if (outargs.numOutBytes > 0) {
mInputSize = 0;
int consumed = (capacity / sizeof(int16_t)) - inargs.numInSamples
+ outargs.numInSamples;
- mInputTimeUs = work->input.ordinal.timestamp
+ c2_cntr64_t currentFrameTimestampUs = mNextFrameTimestampUs;
+ mNextFrameTimestampUs = work->input.ordinal.timestamp
+ (consumed * 1000000ll / channelCount / sampleRate);
- buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
+ std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
#if defined(LOG_NDEBUG) && !LOG_NDEBUG
hexdump(outPtr, std::min(outargs.numOutBytes, 256));
#endif
outPtr = temp;
outAvailable = 0;
block.reset();
+
+ outputBuffers.push_back({buffer, currentFrameTimestampUs});
} else {
mInputSize += outargs.numInSamples * sizeof(int16_t);
}
@@ -506,8 +504,9 @@
inargs.numInSamples -= outargs.numInSamples;
}
}
- ALOGV("encoderErr = %d mInputSize = %zu inargs.numInSamples = %d, mInputTimeUs = %lld",
- encoderErr, mInputSize, inargs.numInSamples, mInputTimeUs.peekll());
+ ALOGV("encoderErr = %d mInputSize = %zu "
+ "inargs.numInSamples = %d, mNextFrameTimestampUs = %lld",
+ encoderErr, mInputSize, inargs.numInSamples, mNextFrameTimestampUs.peekll());
}
if (eos && inBufferSize[0] > 0) {
@@ -542,10 +541,27 @@
&outargs);
}
- outOrdinal.frameIndex = mOutIndex++;
- outOrdinal.timestamp = mInputTimeUs;
+ while (outputBuffers.size() > 1) {
+ const OutputBuffer& front = outputBuffers.front();
+ C2WorkOrdinalStruct ordinal = work->input.ordinal;
+ ordinal.frameIndex = mOutIndex++;
+ ordinal.timestamp = front.timestampUs;
+ cloneAndSend(
+ inputIndex,
+ work,
+ FillWork(C2FrameData::FLAG_INCOMPLETE, ordinal, front.buffer));
+ outputBuffers.pop_front();
+ }
+ std::shared_ptr<C2Buffer> buffer;
+ C2WorkOrdinalStruct ordinal = work->input.ordinal;
+ ordinal.frameIndex = mOutIndex++;
+ if (!outputBuffers.empty()) {
+ ordinal.timestamp = outputBuffers.front().timestampUs;
+ buffer = outputBuffers.front().buffer;
+ }
+ // Mark the end of frame
FillWork((C2FrameData::flags_t)(eos ? C2FrameData::FLAG_END_OF_STREAM : 0),
- outOrdinal, buffer)(work);
+ ordinal, buffer)(work);
}
c2_status_t C2SoftAacEnc::drain(
@@ -569,7 +585,7 @@
mSentCodecSpecificData = false;
mInputTimeSet = false;
mInputSize = 0u;
- mInputTimeUs = 0;
+ mNextFrameTimestampUs = 0;
// TODO: we don't have any pending work at this time to drain.
return C2_OK;
diff --git a/media/codec2/components/aac/C2SoftAacEnc.h b/media/codec2/components/aac/C2SoftAacEnc.h
index a38be19..2655039 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.h
+++ b/media/codec2/components/aac/C2SoftAacEnc.h
@@ -56,7 +56,7 @@
bool mSentCodecSpecificData;
bool mInputTimeSet;
size_t mInputSize;
- c2_cntr64_t mInputTimeUs;
+ c2_cntr64_t mNextFrameTimestampUs;
bool mSignalledError;
std::atomic_uint64_t mOutIndex;
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 9d1cc60..8223273 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -375,7 +375,11 @@
// consumer usage is queried earlier.
- ALOGD("ISConfig%s", status.str().c_str());
+ if (status.str().empty()) {
+ ALOGD("ISConfig not changed");
+ } else {
+ ALOGD("ISConfig%s", status.str().c_str());
+ }
return err;
}
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 8308292..0cbf62b 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -224,7 +224,7 @@
mFirstValidFrameIndex(0u),
mMetaMode(MODE_NONE),
mInputMetEos(false) {
- mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
+ mOutputSurface.lock()->maxDequeueBuffers = 2 * kSmoothnessFactor + kRenderingDepth;
{
Mutexed<Input>::Locked input(mInput);
input->buffers.reset(new DummyInputBuffers(""));
@@ -948,7 +948,8 @@
uint32_t outputGeneration;
{
Mutexed<OutputSurface>::Locked output(mOutputSurface);
- output->maxDequeueBuffers = numOutputSlots + reorderDepth.value + kRenderingDepth;
+ output->maxDequeueBuffers = numOutputSlots + numInputSlots +
+ reorderDepth.value + kRenderingDepth;
outputSurface = output->surface ?
output->surface->getIGraphicBufferProducer() : nullptr;
if (outputSurface) {
@@ -1332,9 +1333,10 @@
ALOGV("[%s] onWorkDone: updated reorder depth to %u",
mName, reorderDepth.value);
size_t numOutputSlots = mOutput.lock()->numSlots;
+ size_t numInputSlots = mInput.lock()->numSlots;
Mutexed<OutputSurface>::Locked output(mOutputSurface);
- output->maxDequeueBuffers =
- numOutputSlots + reorderDepth.value + kRenderingDepth;
+ output->maxDequeueBuffers = numOutputSlots + numInputSlots +
+ reorderDepth.value + kRenderingDepth;
if (output->surface) {
output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
}
@@ -1382,6 +1384,7 @@
bool outputBuffersChanged = false;
size_t numOutputSlots = 0;
+ size_t numInputSlots = mInput.lock()->numSlots;
{
Mutexed<Output>::Locked output(mOutput);
output->outputDelay = outputDelay.value;
@@ -1406,7 +1409,8 @@
uint32_t depth = mReorderStash.lock()->depth();
Mutexed<OutputSurface>::Locked output(mOutputSurface);
- output->maxDequeueBuffers = numOutputSlots + depth + kRenderingDepth;
+ output->maxDequeueBuffers = numOutputSlots + numInputSlots +
+ depth + kRenderingDepth;
if (output->surface) {
output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
}
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 1cfdc19..5adcd94 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -235,7 +235,10 @@
const std::vector<ConfigMapper> &getConfigMappersForSdkKey(std::string key) const {
auto it = mConfigMappers.find(key);
if (it == mConfigMappers.end()) {
- ALOGD("no c2 equivalents for %s", key.c_str());
+ if (mComplained.count(key) == 0) {
+ ALOGD("no c2 equivalents for %s", key.c_str());
+ mComplained.insert(key);
+ }
return NO_MAPPERS;
}
ALOGV("found %zu eqs for %s", it->second.size(), key.c_str());
@@ -304,6 +307,7 @@
private:
std::map<SdkKey, std::vector<ConfigMapper>> mConfigMappers;
+ mutable std::set<std::string> mComplained;
};
const std::vector<ConfigMapper> StandardParams::NO_MAPPERS;
@@ -508,7 +512,8 @@
.limitTo(D::ENCODER & D::VIDEO));
// convert to timestamp base
add(ConfigMapper(KEY_I_FRAME_INTERVAL, C2_PARAMKEY_SYNC_FRAME_INTERVAL, "value")
- .withMappers([](C2Value v) -> C2Value {
+ .limitTo(D::VIDEO & D::ENCODER & D::CONFIG)
+ .withMapper([](C2Value v) -> C2Value {
// convert from i32 to float
int32_t i32Value;
float fpValue;
@@ -518,12 +523,6 @@
return int64_t(c2_min(1000000 * fpValue + 0.5, (double)INT64_MAX));
}
return C2Value();
- }, [](C2Value v) -> C2Value {
- int64_t i64;
- if (v.get(&i64)) {
- return float(i64) / 1000000;
- }
- return C2Value();
}));
// remove when codecs switch to proper coding.gop (add support for calculating gop)
deprecated(ConfigMapper("i-frame-period", "coding.gop", "intra-period")
@@ -1033,7 +1032,25 @@
}
ReflectedParamUpdater::Dict reflected = mParamUpdater->getParams(paramPointers);
- ALOGD("c2 config is %s", reflected.debugString().c_str());
+ std::string config = reflected.debugString();
+ std::set<std::string> configLines;
+ std::string diff;
+ for (size_t start = 0; start != std::string::npos; ) {
+ size_t end = config.find('\n', start);
+ size_t count = (end == std::string::npos)
+ ? std::string::npos
+ : end - start + 1;
+ std::string line = config.substr(start, count);
+ configLines.insert(line);
+ if (mLastConfig.count(line) == 0) {
+ diff.append(line);
+ }
+ start = (end == std::string::npos) ? std::string::npos : end + 1;
+ }
+ if (!diff.empty()) {
+ ALOGD("c2 config diff is %s", diff.c_str());
+ }
+ mLastConfig.swap(configLines);
bool changed = false;
if (domain & mInputDomain) {
diff --git a/media/codec2/sfplugin/CCodecConfig.h b/media/codec2/sfplugin/CCodecConfig.h
index 3bafe3f..a61c8b7 100644
--- a/media/codec2/sfplugin/CCodecConfig.h
+++ b/media/codec2/sfplugin/CCodecConfig.h
@@ -134,6 +134,8 @@
/// For now support a validation function.
std::map<C2Param::Index, LocalParamValidator> mLocalParams;
+ std::set<std::string> mLastConfig;
+
CCodecConfig();
/// initializes the members required to manage the format: descriptors, reflector,
diff --git a/media/codec2/sfplugin/PipelineWatcher.cpp b/media/codec2/sfplugin/PipelineWatcher.cpp
index 74d14e8..0ee9056 100644
--- a/media/codec2/sfplugin/PipelineWatcher.cpp
+++ b/media/codec2/sfplugin/PipelineWatcher.cpp
@@ -146,7 +146,7 @@
std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count());
durations.push_back(elapsed);
}
- std::nth_element(durations.begin(), durations.end(), durations.begin() + n,
+ std::nth_element(durations.begin(), durations.begin() + n, durations.end(),
std::greater<Clock::duration>());
return durations[n];
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 52eadd4..fb276c2 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -36,6 +36,7 @@
#include "binding/AAudioStreamConfiguration.h"
#include "binding/IAAudioService.h"
#include "binding/AAudioServiceMessage.h"
+#include "core/AudioGlobal.h"
#include "core/AudioStreamBuilder.h"
#include "fifo/FifoBuffer.h"
#include "utility/AudioClock.h"
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index a6cc45b..366cc87 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -89,7 +89,11 @@
if (mAudioEndpoint.isFreeRunning()) {
//ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
// Update data queue based on the timing model.
- int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+ // Jitter in the DSP can cause late writes to the FIFO.
+ // This might be caused by resampling.
+ // We want to read the FIFO after the latest possible time
+ // that the DSP could have written the data.
+ int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
// TODO refactor, maybe use setRemoteCounter()
mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
}
@@ -139,7 +143,7 @@
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
- wakeTime = mClockModel.convertPositionToTime(nextPosition);
+ wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
}
break;
default:
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 747d0e1..9abdf53 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -19,12 +19,11 @@
#include <log/log.h>
#include <stdint.h>
+#include <algorithm>
#include "utility/AudioClock.h"
#include "IsochronousClockModel.h"
-#define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
-
using namespace aaudio;
IsochronousClockModel::IsochronousClockModel()
@@ -32,7 +31,7 @@
, mMarkerNanoTime(0)
, mSampleRate(48000)
, mFramesPerBurst(64)
- , mMaxLatenessInNanos(0)
+ , mMaxMeasuredLatenessNanos(0)
, mState(STATE_STOPPED)
{
}
@@ -41,8 +40,7 @@
}
void IsochronousClockModel::setPositionAndTime(int64_t framePosition, int64_t nanoTime) {
- ALOGV("setPositionAndTime(%lld, %lld)",
- (long long) framePosition, (long long) nanoTime);
+ ALOGV("setPositionAndTime, %lld, %lld", (long long) framePosition, (long long) nanoTime);
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
}
@@ -54,7 +52,9 @@
}
void IsochronousClockModel::stop(int64_t nanoTime) {
- ALOGV("stop(nanos = %lld)\n", (long long) nanoTime);
+ ALOGD("stop(nanos = %lld) max lateness = %d micros\n",
+ (long long) nanoTime,
+ (int) (mMaxMeasuredLatenessNanos / 1000));
setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
// TODO should we set position?
mState = STATE_STOPPED;
@@ -69,9 +69,10 @@
}
void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
-// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
-// (long long)framePosition,
-// (long long)nanoTime);
+ mTimestampCount++;
+// Log position and time in CSV format so we can import it easily into spreadsheets.
+ //ALOGD("%s() CSV, %d, %lld, %lld", __func__,
+ //mTimestampCount, (long long)framePosition, (long long)nanoTime);
int64_t framesDelta = framePosition - mMarkerFramePosition;
int64_t nanosDelta = nanoTime - mMarkerNanoTime;
if (nanosDelta < 1000) {
@@ -108,17 +109,56 @@
case STATE_RUNNING:
if (nanosDelta < expectedNanosDelta) {
// Earlier than expected timestamp.
- // This data is probably more accurate so use it.
- // or we may be drifting due to a slow HW clock.
-// ALOGD("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
-// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+ // This data is probably more accurate, so use it.
+ // Or we may be drifting due to a fast HW clock.
+ //int microsDelta = (int) (nanosDelta / 1000);
+ //int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
+ //ALOGD("%s() - STATE_RUNNING - #%d, %4d micros EARLY",
+ //__func__, mTimestampCount, expectedMicrosDelta - microsDelta);
+
setPositionAndTime(framePosition, nanoTime);
- } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
- // Later than expected timestamp.
-// ALOGD("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
-// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
-// (int) (mMaxLatenessInNanos / 1000));
- setPositionAndTime(framePosition - mFramesPerBurst, nanoTime - mMaxLatenessInNanos);
+ } else if (nanosDelta > (expectedNanosDelta + (2 * mBurstPeriodNanos))) {
+ // In this case we do not update mMaxMeasuredLatenessNanos because it
+ // would force it too high.
+ // mMaxMeasuredLatenessNanos should range from 1 to 2 * mBurstPeriodNanos
+ //int32_t measuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
+ //ALOGD("%s() - STATE_RUNNING - #%d, lateness %d - max %d = %4d micros VERY LATE",
+ //__func__,
+ //mTimestampCount,
+ //measuredLatenessNanos / 1000,
+ //mMaxMeasuredLatenessNanos / 1000,
+ //(measuredLatenessNanos - mMaxMeasuredLatenessNanos) / 1000
+ //);
+
+ // This typically happens when we are modelling a service instead of a DSP.
+ setPositionAndTime(framePosition, nanoTime - (2 * mBurstPeriodNanos));
+ } else if (nanosDelta > (expectedNanosDelta + mMaxMeasuredLatenessNanos)) {
+ //int32_t previousLatenessNanos = mMaxMeasuredLatenessNanos;
+ mMaxMeasuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
+
+ //ALOGD("%s() - STATE_RUNNING - #%d, newmax %d - oldmax %d = %4d micros LATE",
+ //__func__,
+ //mTimestampCount,
+ //mMaxMeasuredLatenessNanos / 1000,
+ //previousLatenessNanos / 1000,
+ //(mMaxMeasuredLatenessNanos - previousLatenessNanos) / 1000
+ //);
+
+ // When we are late, it may be because of preemption in the kernel,
+ // or timing jitter caused by resampling in the DSP,
+ // or we may be drifting due to a slow HW clock.
+ // We add slight drift value just in case there is actual long term drift
+ // forward caused by a slower clock.
+ // If the clock is faster than the model will get pushed earlier
+ // by the code in the preceding branch.
+ // The two opposing forces should allow the model to track the real clock
+ // over a long time.
+ int64_t driftingTime = mMarkerNanoTime + expectedNanosDelta + kDriftNanos;
+ setPositionAndTime(framePosition, driftingTime);
+ //ALOGD("%s() - #%d, max lateness = %d micros",
+ //__func__,
+ //mTimestampCount,
+ //(int) (mMaxMeasuredLatenessNanos / 1000));
}
break;
default:
@@ -138,9 +178,12 @@
update();
}
+// Update expected lateness based on sampleRate and framesPerBurst
void IsochronousClockModel::update() {
- int64_t nanosLate = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
- mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
+ mBurstPeriodNanos = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
+ // Timestamps may be late by up to a burst because we are randomly sampling the time period
+ // after the DSP position is actually updated.
+ mMaxMeasuredLatenessNanos = mBurstPeriodNanos;
}
int64_t IsochronousClockModel::convertDeltaPositionToTime(int64_t framesDelta) const {
@@ -183,11 +226,25 @@
return position;
}
+int32_t IsochronousClockModel::getLateTimeOffsetNanos() const {
+ // This will never be < 0 because mMaxLatenessNanos starts at
+ // mBurstPeriodNanos and only gets bigger.
+ return (mMaxMeasuredLatenessNanos - mBurstPeriodNanos) + kExtraLatenessNanos;
+}
+
+int64_t IsochronousClockModel::convertPositionToLatestTime(int64_t framePosition) const {
+ return convertPositionToTime(framePosition) + getLateTimeOffsetNanos();
+}
+
+int64_t IsochronousClockModel::convertLatestTimeToPosition(int64_t nanoTime) const {
+ return convertTimeToPosition(nanoTime - getLateTimeOffsetNanos());
+}
+
void IsochronousClockModel::dump() const {
ALOGD("mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
ALOGD("mMarkerNanoTime = %lld", (long long) mMarkerNanoTime);
ALOGD("mSampleRate = %6d", mSampleRate);
ALOGD("mFramesPerBurst = %6d", mFramesPerBurst);
- ALOGD("mMaxLatenessInNanos = %6d", mMaxLatenessInNanos);
+ ALOGD("mMaxMeasuredLatenessNanos = %6d", mMaxMeasuredLatenessNanos);
ALOGD("mState = %6d", mState);
}
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 46ca48e..582bf4e 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -18,6 +18,7 @@
#define ANDROID_AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
#include <stdint.h>
+#include "utility/AudioClock.h"
namespace aaudio {
@@ -79,6 +80,15 @@
int64_t convertPositionToTime(int64_t framePosition) const;
/**
+ * Calculate the latest estimated time that the stream will be at that position.
+ * The more jittery the clock is then the later this will be.
+ *
+ * @param framePosition
+ * @return time in nanoseconds
+ */
+ int64_t convertPositionToLatestTime(int64_t framePosition) const;
+
+ /**
* Calculate an estimated position where the stream will be at the specified time.
*
* @param nanoTime time of interest
@@ -87,6 +97,18 @@
int64_t convertTimeToPosition(int64_t nanoTime) const;
/**
+ * Calculate the corresponding estimated position based on the specified time being
+ * the latest possible time.
+ *
+ * For the same nanoTime, this may return an earlier position than
+ * convertTimeToPosition().
+ *
+ * @param nanoTime
+ * @return position in frames
+ */
+ int64_t convertLatestTimeToPosition(int64_t nanoTime) const;
+
+ /**
* @param framesDelta difference in frames
* @return duration in nanoseconds
*/
@@ -101,6 +123,9 @@
void dump() const;
private:
+
+ int32_t getLateTimeOffsetNanos() const;
+
enum clock_model_state_t {
STATE_STOPPED,
STATE_STARTING,
@@ -108,13 +133,23 @@
STATE_RUNNING
};
+ // Amount of time to drift forward when we get a late timestamp.
+ // This value was calculated to allow tracking of a clock with 50 ppm error.
+ static constexpr int32_t kDriftNanos = 10 * 1000;
+ // TODO review value of kExtraLatenessNanos
+ static constexpr int32_t kExtraLatenessNanos = 100 * 1000;
+
int64_t mMarkerFramePosition;
int64_t mMarkerNanoTime;
int32_t mSampleRate;
int32_t mFramesPerBurst;
- int32_t mMaxLatenessInNanos;
+ int32_t mBurstPeriodNanos;
+ // Includes mBurstPeriodNanos because we sample randomly over time.
+ int32_t mMaxMeasuredLatenessNanos;
clock_model_state_t mState;
+ int32_t mTimestampCount = 0;
+
void update();
};
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 3fbbc09..10dda19 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -302,6 +302,8 @@
for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
pContext->pBundledContext->bandGaindB[i] = EQNB_5BandSoftPresets[i];
}
+ pContext->pBundledContext->effectProcessCalled = 0;
+ pContext->pBundledContext->effectInDrain = 0;
ALOGV("\tEffectCreate - Calling LvmBundle_init");
ret = LvmBundle_init(pContext);
@@ -394,6 +396,8 @@
// Clear the instantiated flag for the effect
// protect agains the case where an effect is un-instantiated without being disabled
+
+ int &effectInDrain = pContext->pBundledContext->effectInDrain;
if(pContext->EffectType == LVM_BASS_BOOST) {
ALOGV("\tEffectRelease LVM_BASS_BOOST Clearing global intstantiated flag");
pSessionContext->bBassInstantiated = LVM_FALSE;
@@ -418,12 +422,16 @@
} else if(pContext->EffectType == LVM_VOLUME) {
ALOGV("\tEffectRelease LVM_VOLUME Clearing global intstantiated flag");
pSessionContext->bVolumeInstantiated = LVM_FALSE;
- if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE){
+ // There is no samplesToExitCount for volume so we also use the drain flag to check
+ // if we should decrement the effects enabled.
+ if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE
+ || (effectInDrain & 1 << LVM_VOLUME) != 0) {
pContext->pBundledContext->NumberEffectsEnabled--;
}
} else {
ALOGV("\tLVM_ERROR : EffectRelease : Unsupported effect\n\n\n\n\n\n\n");
}
+ effectInDrain &= ~(1 << pContext->EffectType); // no need to drain if released
// Disable effect, in this case ignore errors (return codes)
// if an effect has already been disabled
@@ -3124,8 +3132,9 @@
int Effect_setEnabled(EffectContext *pContext, bool enabled)
{
- ALOGV("\tEffect_setEnabled() type %d, enabled %d", pContext->EffectType, enabled);
-
+ ALOGV("%s effectType %d, enabled %d, currently enabled %d", __func__,
+ pContext->EffectType, enabled, pContext->pBundledContext->NumberEffectsEnabled);
+ int &effectInDrain = pContext->pBundledContext->effectInDrain;
if (enabled) {
// Bass boost or Virtualizer can be temporarily disabled if playing over device speaker due
// to their nature.
@@ -3139,6 +3148,7 @@
if(pContext->pBundledContext->SamplesToExitCountBb <= 0){
pContext->pBundledContext->NumberEffectsEnabled++;
}
+ effectInDrain &= ~(1 << LVM_BASS_BOOST);
pContext->pBundledContext->SamplesToExitCountBb =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
pContext->pBundledContext->bBassEnabled = LVM_TRUE;
@@ -3152,6 +3162,7 @@
if(pContext->pBundledContext->SamplesToExitCountEq <= 0){
pContext->pBundledContext->NumberEffectsEnabled++;
}
+ effectInDrain &= ~(1 << LVM_EQUALIZER);
pContext->pBundledContext->SamplesToExitCountEq =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
pContext->pBundledContext->bEqualizerEnabled = LVM_TRUE;
@@ -3164,6 +3175,7 @@
if(pContext->pBundledContext->SamplesToExitCountVirt <= 0){
pContext->pBundledContext->NumberEffectsEnabled++;
}
+ effectInDrain &= ~(1 << LVM_VIRTUALIZER);
pContext->pBundledContext->SamplesToExitCountVirt =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
pContext->pBundledContext->bVirtualizerEnabled = LVM_TRUE;
@@ -3174,7 +3186,10 @@
ALOGV("\tEffect_setEnabled() LVM_VOLUME is already enabled");
return -EINVAL;
}
- pContext->pBundledContext->NumberEffectsEnabled++;
+ if ((effectInDrain & 1 << LVM_VOLUME) == 0) {
+ pContext->pBundledContext->NumberEffectsEnabled++;
+ }
+ effectInDrain &= ~(1 << LVM_VOLUME);
pContext->pBundledContext->bVolumeEnabled = LVM_TRUE;
break;
default:
@@ -3192,6 +3207,7 @@
return -EINVAL;
}
pContext->pBundledContext->bBassEnabled = LVM_FALSE;
+ effectInDrain |= 1 << LVM_BASS_BOOST;
break;
case LVM_EQUALIZER:
if (pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE) {
@@ -3199,6 +3215,7 @@
return -EINVAL;
}
pContext->pBundledContext->bEqualizerEnabled = LVM_FALSE;
+ effectInDrain |= 1 << LVM_EQUALIZER;
break;
case LVM_VIRTUALIZER:
if (pContext->pBundledContext->bVirtualizerEnabled == LVM_FALSE) {
@@ -3206,6 +3223,7 @@
return -EINVAL;
}
pContext->pBundledContext->bVirtualizerEnabled = LVM_FALSE;
+ effectInDrain |= 1 << LVM_VIRTUALIZER;
break;
case LVM_VOLUME:
if (pContext->pBundledContext->bVolumeEnabled == LVM_FALSE) {
@@ -3213,6 +3231,7 @@
return -EINVAL;
}
pContext->pBundledContext->bVolumeEnabled = LVM_FALSE;
+ effectInDrain |= 1 << LVM_VOLUME;
break;
default:
ALOGV("\tEffect_setEnabled() invalid effect type");
@@ -3283,6 +3302,38 @@
ALOGV("\tLVM_ERROR : Effect_process() ERROR NULL INPUT POINTER OR FRAME COUNT IS WRONG");
return -EINVAL;
}
+
+ int &effectProcessCalled = pContext->pBundledContext->effectProcessCalled;
+ int &effectInDrain = pContext->pBundledContext->effectInDrain;
+ if ((effectProcessCalled & 1 << pContext->EffectType) != 0) {
+ ALOGW("Effect %d already called", pContext->EffectType);
+ const int undrainedEffects = effectInDrain & ~effectProcessCalled;
+ if ((undrainedEffects & 1 << LVM_BASS_BOOST) != 0) {
+ ALOGW("Draining BASS_BOOST");
+ pContext->pBundledContext->SamplesToExitCountBb = 0;
+ --pContext->pBundledContext->NumberEffectsEnabled;
+ effectInDrain &= ~(1 << LVM_BASS_BOOST);
+ }
+ if ((undrainedEffects & 1 << LVM_EQUALIZER) != 0) {
+ ALOGW("Draining EQUALIZER");
+ pContext->pBundledContext->SamplesToExitCountEq = 0;
+ --pContext->pBundledContext->NumberEffectsEnabled;
+ effectInDrain &= ~(1 << LVM_EQUALIZER);
+ }
+ if ((undrainedEffects & 1 << LVM_VIRTUALIZER) != 0) {
+ ALOGW("Draining VIRTUALIZER");
+ pContext->pBundledContext->SamplesToExitCountVirt = 0;
+ --pContext->pBundledContext->NumberEffectsEnabled;
+ effectInDrain &= ~(1 << LVM_VIRTUALIZER);
+ }
+ if ((undrainedEffects & 1 << LVM_VOLUME) != 0) {
+ ALOGW("Draining VOLUME");
+ --pContext->pBundledContext->NumberEffectsEnabled;
+ effectInDrain &= ~(1 << LVM_VOLUME);
+ }
+ }
+ effectProcessCalled |= 1 << pContext->EffectType;
+
if ((pContext->pBundledContext->bBassEnabled == LVM_FALSE)&&
(pContext->EffectType == LVM_BASS_BOOST)){
//ALOGV("\tEffect_process() LVM_BASS_BOOST Effect is not enabled");
@@ -3291,9 +3342,12 @@
//ALOGV("\tEffect_process: Waiting to turn off BASS_BOOST, %d samples left",
// pContext->pBundledContext->SamplesToExitCountBb);
}
- if(pContext->pBundledContext->SamplesToExitCountBb <= 0) {
+ if (pContext->pBundledContext->SamplesToExitCountBb <= 0) {
status = -ENODATA;
- pContext->pBundledContext->NumberEffectsEnabled--;
+ if ((effectInDrain & 1 << LVM_BASS_BOOST) != 0) {
+ pContext->pBundledContext->NumberEffectsEnabled--;
+ effectInDrain &= ~(1 << LVM_BASS_BOOST);
+ }
ALOGV("\tEffect_process() this is the last frame for LVM_BASS_BOOST");
}
}
@@ -3301,7 +3355,10 @@
(pContext->EffectType == LVM_VOLUME)){
//ALOGV("\tEffect_process() LVM_VOLUME Effect is not enabled");
status = -ENODATA;
- pContext->pBundledContext->NumberEffectsEnabled--;
+ if ((effectInDrain & 1 << LVM_VOLUME) != 0) {
+ pContext->pBundledContext->NumberEffectsEnabled--;
+ effectInDrain &= ~(1 << LVM_VOLUME);
+ }
}
if ((pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE)&&
(pContext->EffectType == LVM_EQUALIZER)){
@@ -3311,9 +3368,12 @@
//ALOGV("\tEffect_process: Waiting to turn off EQUALIZER, %d samples left",
// pContext->pBundledContext->SamplesToExitCountEq);
}
- if(pContext->pBundledContext->SamplesToExitCountEq <= 0) {
+ if (pContext->pBundledContext->SamplesToExitCountEq <= 0) {
status = -ENODATA;
- pContext->pBundledContext->NumberEffectsEnabled--;
+ if ((effectInDrain & 1 << LVM_EQUALIZER) != 0) {
+ pContext->pBundledContext->NumberEffectsEnabled--;
+ effectInDrain &= ~(1 << LVM_EQUALIZER);
+ }
ALOGV("\tEffect_process() this is the last frame for LVM_EQUALIZER");
}
}
@@ -3326,9 +3386,12 @@
//ALOGV("\tEffect_process: Waiting for to turn off VIRTUALIZER, %d samples left",
// pContext->pBundledContext->SamplesToExitCountVirt);
}
- if(pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
+ if (pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
status = -ENODATA;
- pContext->pBundledContext->NumberEffectsEnabled--;
+ if ((effectInDrain & 1 << LVM_VIRTUALIZER) != 0) {
+ pContext->pBundledContext->NumberEffectsEnabled--;
+ effectInDrain &= ~(1 << LVM_VIRTUALIZER);
+ }
ALOGV("\tEffect_process() this is the last frame for LVM_VIRTUALIZER");
}
}
@@ -3337,8 +3400,18 @@
pContext->pBundledContext->NumberEffectsCalled++;
}
- if(pContext->pBundledContext->NumberEffectsCalled ==
- pContext->pBundledContext->NumberEffectsEnabled){
+ if (pContext->pBundledContext->NumberEffectsCalled >=
+ pContext->pBundledContext->NumberEffectsEnabled) {
+
+ // We expect the # effects called to be equal to # effects enabled in sequence (including
+ // draining effects). Warn if this is not the case due to inconsistent calls.
+ ALOGW_IF(pContext->pBundledContext->NumberEffectsCalled >
+ pContext->pBundledContext->NumberEffectsEnabled,
+ "%s Number of effects called %d is greater than number of effects enabled %d",
+ __func__, pContext->pBundledContext->NumberEffectsCalled,
+ pContext->pBundledContext->NumberEffectsEnabled);
+ effectProcessCalled = 0; // reset our consistency check.
+
//ALOGV("\tEffect_process Calling process with %d effects enabled, %d called: Effect %d",
//pContext->pBundledContext->NumberEffectsEnabled,
//pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index 6af4554..e4aacd0 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -110,6 +110,14 @@
#ifdef SUPPORT_MC
LVM_INT32 ChMask;
#endif
+
+ /* Bitmask whether drain is in progress due to disabling the effect.
+ The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+ int effectInDrain;
+
+ /* Bitmask whether process() was called for a particular effect.
+ The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+ int effectProcessCalled;
};
/* SessionContext : One session */
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index cb8d375..2bf0802 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -77,10 +77,13 @@
if (t != 0) {
if (enabled) {
if (t->exitPending()) {
+ mCaptureLock.unlock();
if (t->requestExitAndWait() == WOULD_BLOCK) {
+ mCaptureLock.lock();
ALOGE("Visualizer::enable() called from thread");
return INVALID_OPERATION;
}
+ mCaptureLock.lock();
}
}
t->mLock.lock();
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 2f13dc9..f130c9b 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -1635,8 +1635,13 @@
return BAD_VALUE;
}
+ // Increase moovExtraSize once only irrespective of how many times
+ // setCaptureRate is called.
+ bool containsCaptureFps = mMetaKeys->contains(kMetaKey_CaptureFps);
mMetaKeys->setFloat(kMetaKey_CaptureFps, captureFps);
- mMoovExtraSize += sizeof(kMetaKey_CaptureFps) + 4 + 32;
+ if (!containsCaptureFps) {
+ mMoovExtraSize += sizeof(kMetaKey_CaptureFps) + 4 + 32;
+ }
return OK;
}
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index 9ba2add..7ebdb1a 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -96,10 +96,18 @@
sp<MediaAdapter> newTrack = new MediaAdapter(trackMeta);
status_t result = mWriter->addSource(newTrack);
- if (result == OK) {
- return mTrackList.add(newTrack);
+ if (result != OK) {
+ return -1;
}
- return -1;
+ float captureFps = -1.0;
+ if (format->findAsFloat("time-lapse-fps", &captureFps)) {
+ ALOGV("addTrack() time-lapse-fps: %f", captureFps);
+ result = mWriter->setCaptureRate(captureFps);
+ if (result != OK) {
+ ALOGW("addTrack() setCaptureRate failed :%d", result);
+ }
+ }
+ return mTrackList.add(newTrack);
}
status_t MediaMuxer::setOrientationHint(int degrees) {
diff --git a/media/libstagefright/include/media/stagefright/MediaWriter.h b/media/libstagefright/include/media/stagefright/MediaWriter.h
index 2c12a87..972ae1d 100644
--- a/media/libstagefright/include/media/stagefright/MediaWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaWriter.h
@@ -35,6 +35,10 @@
virtual status_t start(MetaData *params = NULL) = 0;
virtual status_t stop() = 0;
virtual status_t pause() = 0;
+ virtual status_t setCaptureRate(float /* captureFps */) {
+ ALOGW("setCaptureRate unsupported");
+ return ERROR_UNSUPPORTED;
+ }
virtual void setMaxFileSize(int64_t bytes) { mMaxFileSizeLimitBytes = bytes; }
virtual void setMaxFileDuration(int64_t durationUs) { mMaxFileDurationLimitUs = durationUs; }
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 1883f63..1145b7b 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -35,6 +35,7 @@
int64_t timestamp, int32_t width, int32_t height, int32_t numPlanes) :
mReader(reader), mFormat(format), mUsage(usage), mBuffer(buffer), mLockedBuffer(nullptr),
mTimestamp(timestamp), mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
+ LOG_FATAL_IF(reader == nullptr, "AImageReader shouldn't be null while creating AImage");
}
AImage::~AImage() {
@@ -57,14 +58,9 @@
if (mIsClosed) {
return;
}
- sp<AImageReader> reader = mReader.promote();
- if (reader != nullptr) {
- reader->releaseImageLocked(this, releaseFenceFd);
- } else if (mBuffer != nullptr) {
- LOG_ALWAYS_FATAL("%s: parent AImageReader closed without releasing image %p",
- __FUNCTION__, this);
+ if (!mReader->mIsClosed) {
+ mReader->releaseImageLocked(this, releaseFenceFd);
}
-
// Should have been set to nullptr in releaseImageLocked
// Set to nullptr here for extra safety only
mBuffer = nullptr;
@@ -83,22 +79,12 @@
void
AImage::lockReader() const {
- sp<AImageReader> reader = mReader.promote();
- if (reader == nullptr) {
- // Reader has been closed
- return;
- }
- reader->mLock.lock();
+ mReader->mLock.lock();
}
void
AImage::unlockReader() const {
- sp<AImageReader> reader = mReader.promote();
- if (reader == nullptr) {
- // Reader has been closed
- return;
- }
- reader->mLock.unlock();
+ mReader->mLock.unlock();
}
media_status_t
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
index e0f16da..0e8cbcb 100644
--- a/media/ndk/NdkImagePriv.h
+++ b/media/ndk/NdkImagePriv.h
@@ -72,7 +72,7 @@
uint32_t getJpegSize() const;
// When reader is close, AImage will only accept close API call
- wp<AImageReader> mReader;
+ const sp<AImageReader> mReader;
const int32_t mFormat;
const uint64_t mUsage; // AHARDWAREBUFFER_USAGE_* flags.
BufferItem* mBuffer;
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index baa4fc7..c0ceb3d 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -113,12 +113,12 @@
void
AImageReader::FrameListener::onFrameAvailable(const BufferItem& /*item*/) {
- Mutex::Autolock _l(mLock);
sp<AImageReader> reader = mReader.promote();
if (reader == nullptr) {
ALOGW("A frame is available after AImageReader closed!");
return; // reader has been closed
}
+ Mutex::Autolock _l(mLock);
if (mListener.onImageAvailable == nullptr) {
return; // No callback registered
}
@@ -143,12 +143,12 @@
void
AImageReader::BufferRemovedListener::onBufferFreed(const wp<GraphicBuffer>& graphicBuffer) {
- Mutex::Autolock _l(mLock);
sp<AImageReader> reader = mReader.promote();
if (reader == nullptr) {
ALOGW("A frame is available after AImageReader closed!");
return; // reader has been closed
}
+ Mutex::Autolock _l(mLock);
if (mListener.onBufferRemoved == nullptr) {
return; // No callback registered
}
@@ -272,6 +272,11 @@
mFrameListener(new FrameListener(this)),
mBufferRemovedListener(new BufferRemovedListener(this)) {}
+AImageReader::~AImageReader() {
+ Mutex::Autolock _l(mLock);
+ LOG_FATAL_IF("AImageReader not closed before destruction", mIsClosed != true);
+}
+
media_status_t
AImageReader::init() {
PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
@@ -347,8 +352,12 @@
return AMEDIA_OK;
}
-AImageReader::~AImageReader() {
+void AImageReader::close() {
Mutex::Autolock _l(mLock);
+ if (mIsClosed) {
+ return;
+ }
+ mIsClosed = true;
AImageReader_ImageListener nullListener = {nullptr, nullptr};
setImageListenerLocked(&nullListener);
@@ -741,6 +750,7 @@
void AImageReader_delete(AImageReader* reader) {
ALOGV("%s", __FUNCTION__);
if (reader != nullptr) {
+ reader->close();
reader->decStrong((void*) AImageReader_delete);
}
return;
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index e328cb1..0779a71 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -76,6 +76,7 @@
int32_t getHeight() const { return mHeight; };
int32_t getFormat() const { return mFormat; };
int32_t getMaxImages() const { return mMaxImages; };
+ void close();
private:
@@ -134,7 +135,7 @@
private:
AImageReader_ImageListener mListener = {nullptr, nullptr};
- wp<AImageReader> mReader;
+ const wp<AImageReader> mReader;
Mutex mLock;
};
sp<FrameListener> mFrameListener;
@@ -149,7 +150,7 @@
private:
AImageReader_BufferRemovedListener mListener = {nullptr, nullptr};
- wp<AImageReader> mReader;
+ const wp<AImageReader> mReader;
Mutex mLock;
};
sp<BufferRemovedListener> mBufferRemovedListener;
@@ -165,6 +166,7 @@
native_handle_t* mWindowHandle = nullptr;
List<AImage*> mAcquiredImages;
+ bool mIsClosed = false;
Mutex mLock;
};
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 3c4fbba..13152d0 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -24,6 +24,7 @@
#include "Configuration.h"
#include <utils/Log.h>
#include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_dynamicsprocessing.h>
#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_visualizer.h>
#include <audio_utils/channels.h>
@@ -2569,7 +2570,8 @@
if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) &&
(((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) ||
(memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) ||
- (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0))) {
+ (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0) ||
+ (memcmp(&desc.type, SL_IID_DYNAMICSPROCESSING, sizeof(effect_uuid_t)) == 0))) {
return false;
}
return true;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index a021866..73292d3 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3958,6 +3958,32 @@
return INVALID_OPERATION;
}
+// For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
+// still applied by the mixer.
+// All tracks attached to a mixer with flag VOIP_RX are tied to the same
+// stream type STREAM_VOICE_CALL so this will only change the HAL volume once even
+// if more than one track are active
+status_t AudioFlinger::PlaybackThread::handleVoipVolume_l(float *volume)
+{
+ status_t result = NO_ERROR;
+ if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
+ if (*volume != mLeftVolFloat) {
+ result = mOutput->stream->setVolume(*volume, *volume);
+ ALOGE_IF(result != OK,
+ "Error when setting output stream volume: %d", result);
+ if (result == NO_ERROR) {
+ mLeftVolFloat = *volume;
+ }
+ }
+ // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
+ // remove stream volume contribution from software volume.
+ if (mLeftVolFloat == *volume) {
+ *volume = 1.0f;
+ }
+ }
+ return result;
+}
+
status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
@@ -4760,22 +4786,25 @@
// no acknowledgement required for newly active tracks
}
sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
+ float volume;
+ if (track->isPlaybackRestricted() || mStreamTypes[track->streamType()].mute) {
+ volume = 0.f;
+ } else {
+ volume = masterVolume * mStreamTypes[track->streamType()].volume;
+ }
+
+ handleVoipVolume_l(&volume);
+
// cache the combined master volume and stream type volume for fast mixer; this
// lacks any synchronization or barrier so VolumeProvider may read a stale value
const float vh = track->getVolumeHandler()->getVolume(
- proxy->framesReleased()).first;
- float volume;
- if (track->isPlaybackRestricted()) {
- volume = 0.f;
- } else {
- volume = masterVolume
- * mStreamTypes[track->streamType()].volume
- * vh;
- }
+ proxy->framesReleased()).first;
+ volume *= vh;
track->mCachedVolume = volume;
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+
track->setFinalVolume((vlf + vrf) / 2.f);
++fastTracks;
} else {
@@ -4918,20 +4947,22 @@
uint32_t vl, vr; // in U8.24 integer format
float vlf, vrf, vaf; // in [0.0, 1.0] float format
// read original volumes with volume control
- float typeVolume = mStreamTypes[track->streamType()].volume;
- float v = masterVolume * typeVolume;
+ float v = masterVolume * mStreamTypes[track->streamType()].volume;
// Always fetch volumeshaper volume to ensure state is updated.
const sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
const float vh = track->getVolumeHandler()->getVolume(
track->mAudioTrackServerProxy->framesReleased()).first;
- if (track->isPausing() || mStreamTypes[track->streamType()].mute
- || track->isPlaybackRestricted()) {
+ if (mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
+ v = 0;
+ }
+
+ handleVoipVolume_l(&v);
+
+ if (track->isPausing()) {
vl = vr = 0;
vlf = vrf = vaf = 0.;
- if (track->isPausing()) {
- track->setPaused();
- }
+ track->setPaused();
} else {
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
@@ -4983,25 +5014,6 @@
track->mHasVolumeController = false;
}
- // For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
- // still applied by the mixer.
- if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
- v = mStreamTypes[track->streamType()].mute ? 0.0f : v;
- if (v != mLeftVolFloat) {
- status_t result = mOutput->stream->setVolume(v, v);
- ALOGE_IF(result != OK, "Error when setting output stream volume: %d", result);
- if (result == OK) {
- mLeftVolFloat = v;
- }
- }
- // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
- // remove stream volume contribution from software volume.
- if (v != 0.0f && mLeftVolFloat == v) {
- vlf = min(1.0f, vlf / v);
- vrf = min(1.0f, vrf / v);
- vaf = min(1.0f, vaf / v);
- }
- }
// XXX: these things DON'T need to be done each time
mAudioMixer->setBufferProvider(trackId, track);
mAudioMixer->enable(trackId);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 31e10a3..acb1370 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -747,6 +747,7 @@
// is safe to do so. That will drop the final ref count and destroy the tracks.
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
void removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
+ status_t handleVoipVolume_l(float *volume);
// StreamOutHalInterfaceCallback implementation
virtual void onWriteReady();
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index e663485..a4868bf 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1149,6 +1149,8 @@
clientPid,
states[states.size() - 1]);
+ resource_policy::ClientPriority clientPriority = clientDescriptor->getPriority();
+
// Find clients that would be evicted
auto evicted = mActiveClientManager.wouldEvict(clientDescriptor);
@@ -1166,8 +1168,7 @@
String8 msg = String8::format("%s : DENIED connect device %s client for package %s "
"(PID %d, score %d state %d) due to eviction policy", curTime.string(),
cameraId.string(), packageName.string(), clientPid,
- priorityScores[priorityScores.size() - 1],
- states[states.size() - 1]);
+ clientPriority.getScore(), clientPriority.getState());
for (auto& i : incompatibleClients) {
msg.appendFormat("\n - Blocked by existing device %s client for package %s"
@@ -1212,9 +1213,8 @@
i->getKey().string(), String8{clientSp->getPackageName()}.string(),
i->getOwnerId(), i->getPriority().getScore(),
i->getPriority().getState(), cameraId.string(),
- packageName.string(), clientPid,
- priorityScores[priorityScores.size() - 1],
- states[states.size() - 1]));
+ packageName.string(), clientPid, clientPriority.getScore(),
+ clientPriority.getState()));
// Notify the client of disconnection
clientSp->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
@@ -1348,14 +1348,19 @@
Status ret = Status::ok();
String8 id = String8(cameraId);
sp<CameraDeviceClient> client = nullptr;
-
+ String16 clientPackageNameAdj = clientPackageName;
+ if (hardware::IPCThreadState::self()->isServingCall()) {
+ std::string vendorClient =
+ StringPrintf("vendor.client.pid<%d>", CameraThreadState::getCallingPid());
+ clientPackageNameAdj = String16(vendorClient.c_str());
+ }
ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
/*api1CameraId*/-1,
- CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
+ CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageNameAdj,
clientUid, USE_CALLING_PID, API_2, /*shimUpdateOnly*/ false, /*out*/client);
if(!ret.isOk()) {
- logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageName),
+ logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageNameAdj),
ret.toString8());
return ret;
}
@@ -2368,11 +2373,7 @@
}
mClientPackageName = packages[0];
}
- if (hardware::IPCThreadState::self()->isServingCall()) {
- std::string vendorClient =
- StringPrintf("vendor.client.pid<%d>", CameraThreadState::getCallingPid());
- mClientPackageName = String16(vendorClient.c_str());
- } else {
+ if (!hardware::IPCThreadState::self()->isServingCall()) {
mAppOpsManager = std::make_unique<AppOpsManager>();
}
}
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 09638d0..98f9328 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -2058,6 +2058,13 @@
return OK;
}
bool CameraProviderManager::ProviderInfo::DeviceInfo3::isAPI1Compatible() const {
+ // Do not advertise NIR cameras to API1 camera app.
+ camera_metadata_ro_entry cfa = mCameraCharacteristics.find(
+ ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT);
+ if (cfa.count == 1 && cfa.data.u8[0] == ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_NIR) {
+ return false;
+ }
+
bool isBackwardCompatible = false;
camera_metadata_ro_entry_t caps = mCameraCharacteristics.find(
ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index 3c90de0..94541d8 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -419,7 +419,7 @@
std::vector<std::unique_ptr<Item>> items;
std::vector<std::unique_ptr<Camera>> cameraList;
- auto image = Image::FromDataForPrimaryImage("android/mainimage", &items);
+ auto image = Image::FromDataForPrimaryImage("image/jpeg", &items);
std::unique_ptr<CameraParams> cameraParams(new CameraParams(std::move(image)));
if (cameraParams == nullptr) {
ALOGE("%s: Failed to initialize camera parameters", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index a8e80fa..4227a3b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -29,6 +29,9 @@
#define CLOGE(fmt, ...) ALOGE("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
##__VA_ARGS__)
+#define CLOGW(fmt, ...) ALOGW("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
+ ##__VA_ARGS__)
+
// Convenience macros for transitioning to the error state
#define SET_ERR(fmt, ...) setErrorState( \
"%s: " fmt, __FUNCTION__, \
@@ -3267,14 +3270,19 @@
ALOGVV("%s: removed frame %d from InFlightMap", __FUNCTION__, frameNumber);
}
- // Sanity check - if we have too many in-flight frames, something has
- // likely gone wrong
- if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
- CLOGE("In-flight list too large: %zu", mInFlightMap.size());
- } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
- kInFlightWarnLimitHighSpeed) {
- CLOGE("In-flight list too large for high speed configuration: %zu",
- mInFlightMap.size());
+ // Sanity check - if we have too many in-flight frames with long total inflight duration,
+ // something has likely gone wrong. This might still be legit only if application send in
+ // a long burst of long exposure requests.
+ if (mExpectedInflightDuration > kMinWarnInflightDuration) {
+ if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
+ CLOGW("In-flight list too large: %zu, total inflight duration %" PRIu64,
+ mInFlightMap.size(), mExpectedInflightDuration);
+ } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
+ kInFlightWarnLimitHighSpeed) {
+ CLOGW("In-flight list too large for high speed configuration: %zu,"
+ "total inflight duration %" PRIu64,
+ mInFlightMap.size(), mExpectedInflightDuration);
+ }
}
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 6e8ac84..cae34ce 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -227,6 +227,7 @@
static const size_t kDumpLockAttempts = 10;
static const size_t kDumpSleepDuration = 100000; // 0.10 sec
static const nsecs_t kActiveTimeout = 500000000; // 500 ms
+ static const nsecs_t kMinWarnInflightDuration = 5000000000; // 5 s
static const size_t kInFlightWarnLimit = 30;
static const size_t kInFlightWarnLimitHighSpeed = 256; // batch size 32 * pipe depth 8
static const nsecs_t kDefaultExpectedDuration = 100000000; // 100 ms
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index f668c33..6a82b1b 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -21,6 +21,7 @@
#include "minijail.h"
#include <binder/ProcessState.h>
+#include <cutils/properties.h>
#include <hidl/HidlTransportSupport.h>
#include <media/stagefright/omx/1.0/Omx.h>
#include <media/stagefright/omx/1.0/OmxStore.h>
@@ -57,7 +58,8 @@
} else {
LOG(INFO) << "IOmx HAL service created.";
}
- sp<IOmxStore> omxStore = new implementation::OmxStore(omx);
+ sp<IOmxStore> omxStore = new implementation::OmxStore(
+ property_get_int64("vendor.media.omx", 1) ? omx : nullptr);
if (omxStore == nullptr) {
LOG(ERROR) << "Cannot create IOmxStore HAL service.";
} else if (omxStore->registerAsService() != OK) {