Merge "Back to the old way of making sure that no more buffers are submitted" into klp-dev
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index a8ffd4a..e796ab3 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -264,6 +264,7 @@
status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg);
status_t setupH263EncoderParameters(const sp<AMessage> &msg);
status_t setupAVCEncoderParameters(const sp<AMessage> &msg);
+ status_t setupVPXEncoderParameters(const sp<AMessage> &msg);
status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level);
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index 6b7a63c..34213be 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -41,7 +41,8 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenTimeLapseFrameCaptureUs);
+ int64_t timeBetweenTimeLapseFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers = true);
virtual ~CameraSourceTimeLapse();
@@ -116,7 +117,8 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenTimeLapseFrameCaptureUs);
+ int64_t timeBetweenTimeLapseFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers = true);
// Wrapper over CameraSource::signalBufferReturned() to implement quick stop.
// It only handles the case when mLastReadBufferCopy is signalled. Otherwise
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 095d5ca..f9d9020 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -70,8 +70,9 @@
mOutputFd(-1),
mAudioSource(AUDIO_SOURCE_CNT),
mVideoSource(VIDEO_SOURCE_LIST_END),
- mStarted(false), mSurfaceMediaSource(NULL),
- mCaptureTimeLapse(false) {
+ mCaptureTimeLapse(false),
+ mStarted(false),
+ mSurfaceMediaSource(NULL) {
ALOGV("Constructor");
reset();
@@ -1089,7 +1090,22 @@
}
}
-status_t StagefrightRecorder::checkVideoEncoderCapabilities() {
+status_t StagefrightRecorder::checkVideoEncoderCapabilities(
+ bool *supportsCameraSourceMetaDataMode) {
+ /* hardware codecs must support camera source meta data mode */
+ Vector<CodecCapabilities> codecs;
+ OMXClient client;
+ CHECK_EQ(client.connect(), (status_t)OK);
+ QueryCodecs(
+ client.interface(),
+ (mVideoEncoder == VIDEO_ENCODER_H263 ? MEDIA_MIMETYPE_VIDEO_H263 :
+ mVideoEncoder == VIDEO_ENCODER_MPEG_4_SP ? MEDIA_MIMETYPE_VIDEO_MPEG4 :
+ mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC : ""),
+ false /* decoder */, true /* hwCodec */, &codecs);
+ *supportsCameraSourceMetaDataMode = codecs.size() > 0;
+ ALOGV("encoder %s camera source meta-data mode",
+ *supportsCameraSourceMetaDataMode ? "supports" : "DOES NOT SUPPORT");
+
if (!mCaptureTimeLapse) {
// Dont clip for time lapse capture as encoder will have enough
// time to encode because of slow capture rate of time lapse.
@@ -1307,7 +1323,9 @@
status_t StagefrightRecorder::setupCameraSource(
sp<CameraSource> *cameraSource) {
status_t err = OK;
- if ((err = checkVideoEncoderCapabilities()) != OK) {
+ bool encoderSupportsCameraSourceMetaDataMode;
+ if ((err = checkVideoEncoderCapabilities(
+ &encoderSupportsCameraSourceMetaDataMode)) != OK) {
return err;
}
Size videoSize;
@@ -1323,13 +1341,14 @@
mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
videoSize, mFrameRate, mPreviewSurface,
- mTimeBetweenTimeLapseFrameCaptureUs);
+ mTimeBetweenTimeLapseFrameCaptureUs,
+ encoderSupportsCameraSourceMetaDataMode);
*cameraSource = mCameraSourceTimeLapse;
} else {
*cameraSource = CameraSource::CreateFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
videoSize, mFrameRate,
- mPreviewSurface, true /*storeMetaDataInVideoBuffers*/);
+ mPreviewSurface, encoderSupportsCameraSourceMetaDataMode);
}
mCamera.clear();
mCameraProxy.clear();
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index c864207..31f09e0 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -139,7 +139,8 @@
status_t startRTPRecording();
status_t startMPEG2TSRecording();
sp<MediaSource> createAudioSource();
- status_t checkVideoEncoderCapabilities();
+ status_t checkVideoEncoderCapabilities(
+ bool *supportsCameraSourceMetaDataMode);
status_t checkAudioEncoderCapabilities();
// Generic MediaSource set-up. Returns the appropriate
// source (CameraSource or SurfaceMediaSource)
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 92a5361..528fdb9 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1909,6 +1909,11 @@
err = setupAVCEncoderParameters(msg);
break;
+ case OMX_VIDEO_CodingVP8:
+ case OMX_VIDEO_CodingVP9:
+ err = setupVPXEncoderParameters(msg);
+ break;
+
default:
break;
}
@@ -2240,6 +2245,17 @@
return configureBitrate(bitrate, bitrateMode);
}
+status_t ACodec::setupVPXEncoderParameters(const sp<AMessage> &msg) {
+ int32_t bitrate;
+ if (!msg->findInt32("bitrate", &bitrate)) {
+ return INVALID_OPERATION;
+ }
+
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+
+ return configureBitrate(bitrate, bitrateMode);
+}
+
status_t ACodec::verifySupportForProfileAndLevel(
int32_t profile, int32_t level) {
OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 20214e8..5772316 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -41,13 +41,15 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenFrameCaptureUs) {
+ int64_t timeBetweenFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers) {
CameraSourceTimeLapse *source = new
CameraSourceTimeLapse(camera, proxy, cameraId,
clientName, clientUid,
videoSize, videoFrameRate, surface,
- timeBetweenFrameCaptureUs);
+ timeBetweenFrameCaptureUs,
+ storeMetaDataInVideoBuffers);
if (source != NULL) {
if (source->initCheck() != OK) {
@@ -67,9 +69,11 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenFrameCaptureUs)
+ int64_t timeBetweenFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers)
: CameraSource(camera, proxy, cameraId, clientName, clientUid,
- videoSize, videoFrameRate, surface, true),
+ videoSize, videoFrameRate, surface,
+ storeMetaDataInVideoBuffers),
mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate),
mLastTimeLapseFrameRealTimestampUs(0),
mSkipCurrentFrame(false) {
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index 6a16bb4..1a9a26b 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -38,11 +38,14 @@
namespace android {
+static int64_t kWakelockMinDelay = 100000ll; // 100ms
+
TimedEventQueue::TimedEventQueue()
: mNextEventID(1),
mRunning(false),
mStopped(false),
- mDeathRecipient(new PMDeathRecipient(this)) {
+ mDeathRecipient(new PMDeathRecipient(this)),
+ mWakeLockCount(0) {
}
TimedEventQueue::~TimedEventQueue() {
@@ -87,9 +90,7 @@
// some events may be left in the queue if we did not flush and the wake lock
// must be released.
- if (!mQueue.empty()) {
- releaseWakeLock_l();
- }
+ releaseWakeLock_l(true /*force*/);
mQueue.clear();
mRunning = false;
@@ -126,13 +127,15 @@
QueueItem item;
item.event = event;
item.realtime_us = realtime_us;
+ item.has_wakelock = false;
if (it == mQueue.begin()) {
mQueueHeadChangedCondition.signal();
}
- if (mQueue.empty()) {
+ if (realtime_us > ALooper::GetNowUs() + kWakelockMinDelay) {
acquireWakeLock_l();
+ item.has_wakelock = true;
}
mQueue.insert(it, item);
@@ -188,10 +191,10 @@
ALOGV("cancelling event %d", (*it).event->eventID());
(*it).event->setEventID(0);
- it = mQueue.erase(it);
- if (mQueue.empty()) {
+ if ((*it).has_wakelock) {
releaseWakeLock_l();
}
+ it = mQueue.erase(it);
if (stopAfterFirstMatch) {
return;
}
@@ -297,11 +300,10 @@
if ((*it).event->eventID() == id) {
sp<Event> event = (*it).event;
event->setEventID(0);
-
- mQueue.erase(it);
- if (mQueue.empty()) {
+ if ((*it).has_wakelock) {
releaseWakeLock_l();
}
+ mQueue.erase(it);
return event;
}
}
@@ -313,51 +315,59 @@
void TimedEventQueue::acquireWakeLock_l()
{
- if (mWakeLockToken != 0) {
- return;
- }
- if (mPowerManager == 0) {
- // use checkService() to avoid blocking if power service is not up yet
- sp<IBinder> binder =
- defaultServiceManager()->checkService(String16("power"));
- if (binder == 0) {
- ALOGW("cannot connect to the power manager service");
- } else {
- mPowerManager = interface_cast<IPowerManager>(binder);
- binder->linkToDeath(mDeathRecipient);
+ if (mWakeLockCount++ == 0) {
+ CHECK(mWakeLockToken == 0);
+ if (mPowerManager == 0) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == 0) {
+ ALOGW("cannot connect to the power manager service");
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
}
- }
- if (mPowerManager != 0) {
- sp<IBinder> binder = new BBinder();
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
- binder,
- String16("TimedEventQueue"),
- String16("media"));
- IPCThreadState::self()->restoreCallingIdentity(token);
- if (status == NO_ERROR) {
- mWakeLockToken = binder;
+ if (mPowerManager != 0) {
+ sp<IBinder> binder = new BBinder();
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
+ binder,
+ String16("TimedEventQueue"),
+ String16("media"));
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ if (status == NO_ERROR) {
+ mWakeLockToken = binder;
+ }
}
}
}
-void TimedEventQueue::releaseWakeLock_l()
+void TimedEventQueue::releaseWakeLock_l(bool force)
{
- if (mWakeLockToken == 0) {
- return;
+ if (force) {
+ if (mWakeLockCount == 0) {
+ return;
+ }
+ // Force wakelock release below by setting reference count to 1.
+ mWakeLockCount = 1;
}
- if (mPowerManager != 0) {
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mPowerManager->releaseWakeLock(mWakeLockToken, 0);
- IPCThreadState::self()->restoreCallingIdentity(token);
+ CHECK(mWakeLockCount != 0);
+ if (--mWakeLockCount == 0) {
+ CHECK(mWakeLockToken != 0);
+ if (mPowerManager != 0) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ }
+ mWakeLockToken.clear();
}
- mWakeLockToken.clear();
}
void TimedEventQueue::clearPowerManager()
{
Mutex::Autolock _l(mLock);
- releaseWakeLock_l();
+ releaseWakeLock_l(true /*force*/);
mPowerManager.clear();
}
diff --git a/media/libstagefright/include/TimedEventQueue.h b/media/libstagefright/include/TimedEventQueue.h
index 4e49c83..38a08b1 100644
--- a/media/libstagefright/include/TimedEventQueue.h
+++ b/media/libstagefright/include/TimedEventQueue.h
@@ -118,6 +118,7 @@
struct QueueItem {
sp<Event> event;
int64_t realtime_us;
+ bool has_wakelock;
};
struct StopEvent : public TimedEventQueue::Event {
@@ -139,6 +140,7 @@
sp<IPowerManager> mPowerManager;
sp<IBinder> mWakeLockToken;
const sp<PMDeathRecipient> mDeathRecipient;
+ uint32_t mWakeLockCount;
static void *ThreadWrapper(void *me);
void threadEntry();
@@ -146,7 +148,7 @@
sp<Event> removeEventFromQueue_l(event_id id);
void acquireWakeLock_l();
- void releaseWakeLock_l();
+ void releaseWakeLock_l(bool force = false);
TimedEventQueue(const TimedEventQueue &);
TimedEventQueue &operator=(const TimedEventQueue &);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 72126c1..1cdf8dc 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -81,7 +81,8 @@
mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
FRAME_PROCESSOR_LISTENER_MAX_ID,
- /*listener*/this);
+ /*listener*/this,
+ /*quirkSendPartials*/true);
return OK;
}
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index b2c9b33..f2064fb 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -37,11 +37,11 @@
}
status_t FrameProcessorBase::registerListener(int32_t minId,
- int32_t maxId, wp<FilteredListener> listener) {
+ int32_t maxId, wp<FilteredListener> listener, bool quirkSendPartials) {
Mutex::Autolock l(mInputMutex);
ALOGV("%s: Registering listener for frame id range %d - %d",
__FUNCTION__, minId, maxId);
- RangeListener rListener = { minId, maxId, listener };
+ RangeListener rListener = { minId, maxId, listener, quirkSendPartials };
mRangeListeners.push_back(rListener);
return OK;
}
@@ -145,13 +145,14 @@
ATRACE_CALL();
camera_metadata_ro_entry_t entry;
- // Quirks: Don't deliver partial results to listeners
+ // Quirks: Don't deliver partial results to listeners that don't want them
+ bool quirkIsPartial = false;
entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
if (entry.count != 0 &&
entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
ALOGV("%s: Camera %d: Not forwarding partial result to listeners",
__FUNCTION__, device->getId());
- return OK;
+ quirkIsPartial = true;
}
entry = frame.find(ANDROID_REQUEST_ID);
@@ -169,7 +170,8 @@
List<RangeListener>::iterator item = mRangeListeners.begin();
while (item != mRangeListeners.end()) {
if (requestId >= item->minId &&
- requestId < item->maxId) {
+ requestId < item->maxId &&
+ (!quirkIsPartial || item->quirkSendPartials) ) {
sp<FilteredListener> listener = item->listener.promote();
if (listener == 0) {
item = mRangeListeners.erase(item);
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.h b/services/camera/libcameraservice/common/FrameProcessorBase.h
index 4d80ebf..89b608a 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.h
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.h
@@ -44,9 +44,11 @@
};
// Register a listener for a range of IDs [minId, maxId). Multiple listeners
- // can be listening to the same range
+ // can be listening to the same range.
+ // QUIRK: sendPartials controls whether partial results will be sent.
status_t registerListener(int32_t minId, int32_t maxId,
- wp<FilteredListener> listener);
+ wp<FilteredListener> listener,
+ bool quirkSendPartials = true);
status_t removeListener(int32_t minId, int32_t maxId,
wp<FilteredListener> listener);
@@ -64,6 +66,7 @@
int32_t minId;
int32_t maxId;
wp<FilteredListener> listener;
+ bool quirkSendPartials;
};
List<RangeListener> mRangeListeners;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index cb72e0e..3dbc1b0 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1405,7 +1405,8 @@
* Check if all 3A fields are ready, and send off a partial 3A-only result
* to the output frame queue
*/
-bool Camera3Device::processPartial3AQuirk(int32_t frameNumber,
+bool Camera3Device::processPartial3AQuirk(
+ int32_t frameNumber, int32_t requestId,
const CameraMetadata& partial) {
// Check if all 3A states are present
@@ -1452,10 +1453,10 @@
if (!gotAllStates) return false;
- ALOGVV("%s: Camera %d: Frame %d: AF mode %d, AWB mode %d, "
+ ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, "
"AF state %d, AE state %d, AWB state %d, "
"AF trigger %d, AE precapture trigger %d",
- __FUNCTION__, mId, frameNumber,
+ __FUNCTION__, mId, frameNumber, requestId,
afMode, awbMode,
afState, aeState, awbState,
afTriggerId, aeTriggerId);
@@ -1463,9 +1464,10 @@
// Got all states, so construct a minimal result to send
// In addition to the above fields, this means adding in
// android.request.frameCount
+ // android.request.requestId
// android.quirks.partialResult
- const size_t kMinimal3AResultEntries = 7;
+ const size_t kMinimal3AResultEntries = 10;
Mutex::Autolock l(mOutputLock);
@@ -1479,6 +1481,11 @@
return false;
}
+ if (!insert3AResult(min3AResult, ANDROID_REQUEST_ID,
+ &requestId, frameNumber)) {
+ return false;
+ }
+
static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
if (!insert3AResult(min3AResult, ANDROID_QUIRKS_PARTIAL_RESULT,
&partialResult, frameNumber)) {
@@ -1615,7 +1622,8 @@
if (!request.partialResultQuirk.haveSent3A) {
request.partialResultQuirk.haveSent3A =
processPartial3AQuirk(frameNumber,
- request.partialResultQuirk.collectedResult);
+ request.requestId,
+ request.partialResultQuirk.collectedResult);
}
}
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 4a24a88..468f641 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -491,7 +491,8 @@
* and if so, queue up 3A-only result to the client. Returns true if 3A
* is sent.
*/
- bool processPartial3AQuirk(int32_t frameNumber, const CameraMetadata& partial);
+ bool processPartial3AQuirk(int32_t frameNumber, int32_t requestId,
+ const CameraMetadata& partial);
// Helpers for reading and writing 3A metadata into to/from partial results
template<typename T>