camera2: Add support for secondary surface for stream
- Enhance OutputConfiguration to contain multiple surfaces for one
underlying stream.
- Create Camera3SharedOutputStream to handle streams with multiple
surfaces.
- Create Camera3StreamSplitter to handle buffer flows between camera and
multiple consumers.
Test: cts, and manually test camera preview/snapshot/recording
Bug: 33777818
Change-Id: Ia010c3cc9d9b4bd5b9ea03cc42fe4e0a0d8033f1
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6f64dc3..ae62e74 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -53,6 +53,7 @@
#include "device3/Camera3InputStream.h"
#include "device3/Camera3ZslStream.h"
#include "device3/Camera3DummyStream.h"
+#include "device3/Camera3SharedOutputStream.h"
#include "CameraService.h"
using namespace android::camera3;
@@ -792,7 +793,9 @@
}
status_t Camera3Device::convertMetadataListToRequestListLocked(
- const List<const CameraMetadata> &metadataList, bool repeating,
+ const List<const CameraMetadata> &metadataList,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
RequestList *requestList) {
if (requestList == NULL) {
CLOGE("requestList cannot be NULL.");
@@ -800,9 +803,11 @@
}
int32_t burstId = 0;
- for (List<const CameraMetadata>::const_iterator it = metadataList.begin();
- it != metadataList.end(); ++it) {
- sp<CaptureRequest> newRequest = setUpRequestLocked(*it);
+ List<const CameraMetadata>::const_iterator metadataIt = metadataList.begin();
+ std::list<const SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
+ for (; metadataIt != metadataList.end() && surfaceMapIt != surfaceMaps.end();
+ ++metadataIt, ++surfaceMapIt) {
+ sp<CaptureRequest> newRequest = setUpRequestLocked(*metadataIt, *surfaceMapIt);
if (newRequest == 0) {
CLOGE("Can't create capture request");
return BAD_VALUE;
@@ -812,12 +817,12 @@
// Setup burst Id and request Id
newRequest->mResultExtras.burstId = burstId++;
- if (it->exists(ANDROID_REQUEST_ID)) {
- if (it->find(ANDROID_REQUEST_ID).count == 0) {
+ if (metadataIt->exists(ANDROID_REQUEST_ID)) {
+ if (metadataIt->find(ANDROID_REQUEST_ID).count == 0) {
CLOGE("RequestID entry exists; but must not be empty in metadata");
return BAD_VALUE;
}
- newRequest->mResultExtras.requestId = it->find(ANDROID_REQUEST_ID).data.i32[0];
+ newRequest->mResultExtras.requestId = metadataIt->find(ANDROID_REQUEST_ID).data.i32[0];
} else {
CLOGE("RequestID does not exist in metadata");
return BAD_VALUE;
@@ -827,6 +832,10 @@
ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId);
}
+ if (metadataIt != metadataList.end() || surfaceMapIt != surfaceMaps.end()) {
+ ALOGE("%s: metadataList and surfaceMaps are not the same size!", __FUNCTION__);
+ return BAD_VALUE;
+ }
// Setup batch size if this is a high speed video recording request.
if (mIsConstrainedHighSpeedConfiguration && requestList->size() > 0) {
@@ -846,12 +855,31 @@
ATRACE_CALL();
List<const CameraMetadata> requests;
+ std::list<const SurfaceMap> surfaceMaps;
+ convertToRequestList(requests, surfaceMaps, request);
+
+ return captureList(requests, surfaceMaps, /*lastFrameNumber*/NULL);
+}
+
+void Camera3Device::convertToRequestList(List<const CameraMetadata>& requests,
+ std::list<const SurfaceMap>& surfaceMaps,
+ const CameraMetadata& request) {
requests.push_back(request);
- return captureList(requests, /*lastFrameNumber*/NULL);
+
+ SurfaceMap surfaceMap;
+ camera_metadata_ro_entry streams = request.find(ANDROID_REQUEST_OUTPUT_STREAMS);
+ // With no surface list passed in, stream and surface will have 1-to-1
+ // mapping. So the surface index is 0 for each stream in the surfaceMap.
+ for (size_t i = 0; i < streams.count; i++) {
+ surfaceMap[streams.data.i32[i]].push_back(0);
+ }
+ surfaceMaps.push_back(surfaceMap);
}
status_t Camera3Device::submitRequestsHelper(
- const List<const CameraMetadata> &requests, bool repeating,
+ const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
/*out*/
int64_t *lastFrameNumber) {
ATRACE_CALL();
@@ -866,8 +894,8 @@
RequestList requestList;
- res = convertMetadataListToRequestListLocked(requests, repeating,
- /*out*/&requestList);
+ res = convertMetadataListToRequestListLocked(requests, surfaceMaps,
+ repeating, /*out*/&requestList);
if (res != OK) {
// error logged by previous call
return res;
@@ -1035,10 +1063,11 @@
}
status_t Camera3Device::captureList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber) {
ATRACE_CALL();
- return submitRequestsHelper(requests, /*repeating*/false, lastFrameNumber);
+ return submitRequestsHelper(requests, surfaceMaps, /*repeating*/false, lastFrameNumber);
}
status_t Camera3Device::setStreamingRequest(const CameraMetadata &request,
@@ -1046,19 +1075,23 @@
ATRACE_CALL();
List<const CameraMetadata> requests;
- requests.push_back(request);
- return setStreamingRequestList(requests, /*lastFrameNumber*/NULL);
+ std::list<const SurfaceMap> surfaceMaps;
+ convertToRequestList(requests, surfaceMaps, request);
+
+ return setStreamingRequestList(requests, /*surfaceMap*/surfaceMaps,
+ /*lastFrameNumber*/NULL);
}
status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber) {
ATRACE_CALL();
- return submitRequestsHelper(requests, /*repeating*/true, lastFrameNumber);
+ return submitRequestsHelper(requests, surfaceMaps, /*repeating*/true, lastFrameNumber);
}
sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
- const CameraMetadata &request) {
+ const CameraMetadata &request, const SurfaceMap &surfaceMap) {
status_t res;
if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
@@ -1074,7 +1107,7 @@
}
}
- sp<CaptureRequest> newRequest = createCaptureRequest(request);
+ sp<CaptureRequest> newRequest = createCaptureRequest(request, surfaceMap);
return newRequest;
}
@@ -1258,8 +1291,27 @@
}
status_t Camera3Device::createStream(sp<Surface> consumer,
- uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
- camera3_stream_rotation_t rotation, int *id, int streamSetId, uint32_t consumerUsage) {
+ uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId, uint32_t consumerUsage) {
+ ATRACE_CALL();
+
+ if (consumer == nullptr) {
+ ALOGE("%s: consumer must not be null", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ std::vector<sp<Surface>> consumers;
+ consumers.push_back(consumer);
+
+ return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
+ format, dataSpace, rotation, id, streamSetId, consumerUsage);
+}
+
+status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId, uint32_t consumerUsage) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
@@ -1303,18 +1355,24 @@
streamSetId = CAMERA3_STREAM_SET_ID_INVALID;
}
+ if (consumers.size() == 0 && !hasDeferredConsumer) {
+ ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
+ return BAD_VALUE;
+ }
// HAL3.1 doesn't support deferred consumer stream creation as it requires buffer registration
// which requires a consumer surface to be available.
- if (consumer == nullptr && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+ if (hasDeferredConsumer && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
ALOGE("HAL3.1 doesn't support deferred consumer stream creation");
return BAD_VALUE;
}
- if (consumer == nullptr && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ if (hasDeferredConsumer && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
ALOGE("Deferred consumer stream creation only support IMPLEMENTATION_DEFINED format");
return BAD_VALUE;
}
+ bool streamSharing = consumers.size() > 1 || (consumers.size() > 0 && hasDeferredConsumer);
+
// Use legacy dataspace values for older HALs
if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_3) {
dataSpace = mapToLegacyDataspace(dataSpace);
@@ -1334,7 +1392,7 @@
return BAD_VALUE;
}
}
- newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, blobBufferSize, format, dataSpace, rotation,
mTimestampOffset, streamSetId);
} else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -1343,15 +1401,19 @@
SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
return BAD_VALUE;
}
- newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
mTimestampOffset, streamSetId);
- } else if (consumer == nullptr) {
+ } else if (consumers.size() == 0 && hasDeferredConsumer) {
newStream = new Camera3OutputStream(mNextStreamId,
width, height, format, consumerUsage, dataSpace, rotation,
mTimestampOffset, streamSetId);
+ } else if (streamSharing) {
+ newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
+ hasDeferredConsumer, width, height, format, consumerUsage,
+ dataSpace, rotation, mTimestampOffset, streamSetId);
} else {
- newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, format, dataSpace, rotation,
mTimestampOffset, streamSetId);
}
@@ -2029,16 +2091,18 @@
return res;
}
- if (!stream->isConfiguring()) {
- CLOGE("Stream %d was already fully configured.", streamId);
- return INVALID_OPERATION;
- }
+ if (stream->isConsumerConfigurationDeferred()) {
+ if (!stream->isConfiguring()) {
+ CLOGE("Stream %d was already fully configured.", streamId);
+ return INVALID_OPERATION;
+ }
- res = stream->finishConfiguration();
- if (res != OK) {
- SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
- stream->getId(), strerror(-res), res);
- return res;
+ res = stream->finishConfiguration();
+ if (res != OK) {
+ SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+ stream->getId(), strerror(-res), res);
+ return res;
+ }
}
return OK;
@@ -2049,7 +2113,7 @@
*/
sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
- const CameraMetadata &request) {
+ const CameraMetadata &request, const SurfaceMap &surfaceMap) {
ATRACE_CALL();
status_t res;
@@ -2104,10 +2168,17 @@
mOutputStreams.editValueAt(idx);
// It is illegal to include a deferred consumer output stream into a request
- if (stream->isConsumerConfigurationDeferred()) {
- CLOGE("Stream %d hasn't finished configuration yet due to deferred consumer",
- stream->getId());
- return NULL;
+ auto iter = surfaceMap.find(streams.data.i32[i]);
+ if (iter != surfaceMap.end()) {
+ const std::vector<size_t>& surfaces = iter->second;
+ for (const auto& surface : surfaces) {
+ if (stream->isConsumerConfigurationDeferred(surface)) {
+ CLOGE("Stream %d surface %zu hasn't finished configuration yet "
+ "due to deferred consumer", stream->getId(), surface);
+ return NULL;
+ }
+ }
+ newRequest->mOutputSurfaces[i] = surfaces;
}
// Lazy completion of stream configuration (allocation/registration)
@@ -3927,6 +3998,14 @@
return TIMED_OUT;
}
halRequest->num_output_buffers++;
+
+ res = outputStream->notifyRequestedSurfaces(halRequest->frame_number,
+ captureRequest->mOutputSurfaces[i]);
+ if (res != OK) {
+ ALOGE("RequestThread: Cannot register output surfaces: %s (%d)",
+ strerror(-res), res);
+ return INVALID_OPERATION;
+ }
}
totalNumBuffers += halRequest->num_output_buffers;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 217c8b7..fe4508d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -95,10 +95,12 @@
// idle state
status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) override;
status_t captureList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) override;
status_t setStreamingRequest(const CameraMetadata &request,
int64_t *lastFrameNumber = NULL) override;
status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) override;
status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) override;
@@ -114,6 +116,12 @@
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
uint32_t consumerUsage = 0) override;
+ status_t createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+ uint32_t consumerUsage = 0) override;
+
status_t createInputStream(
uint32_t width, uint32_t height, int format,
int *id) override;
@@ -342,6 +350,7 @@
camera3_stream_buffer_t mInputBuffer;
Vector<sp<camera3::Camera3OutputStreamInterface> >
mOutputStreams;
+ SurfaceMap mOutputSurfaces;
CaptureResultExtras mResultExtras;
// Used to cancel AE precapture trigger for devices doesn't support
// CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
@@ -360,11 +369,18 @@
status_t convertMetadataListToRequestListLocked(
const List<const CameraMetadata> &metadataList,
+ const std::list<const SurfaceMap> &surfaceMaps,
bool repeating,
/*out*/
RequestList *requestList);
- status_t submitRequestsHelper(const List<const CameraMetadata> &requests, bool repeating,
+ void convertToRequestList(List<const CameraMetadata>& requests,
+ std::list<const SurfaceMap>& surfaceMaps,
+ const CameraMetadata& request);
+
+ status_t submitRequestsHelper(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
int64_t *lastFrameNumber = NULL);
@@ -436,13 +452,15 @@
* Do common work for setting up a streaming or single capture request.
* On success, will transition to ACTIVE if in IDLE.
*/
- sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request);
+ sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request,
+ const SurfaceMap &surfaceMap);
/**
* Build a CaptureRequest request from the CameraDeviceBase request
* settings.
*/
- sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request);
+ sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request,
+ const SurfaceMap &surfaceMap);
/**
* Take the currently-defined set of streams and configure the HAL to use
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 5123785..7f61c7a 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -83,6 +83,14 @@
return OK;
}
+status_t Camera3DummyStream::notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids) {
+ (void) frame_number;
+ (void) surface_ids;
+ // Do nothing
+ return OK;
+}
+
status_t Camera3DummyStream::configureQueueLocked() {
// Do nothing
return OK;
@@ -103,7 +111,7 @@
return false;
}
-bool Camera3DummyStream::isConsumerConfigurationDeferred() const {
+bool Camera3DummyStream::isConsumerConfigurationDeferred(size_t /*surface_id*/) const {
return false;
}
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 18e8a23..37efbbb 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -56,6 +56,9 @@
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
+ virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids);
+
/**
* Return if this output stream is for video encoding.
*/
@@ -64,7 +67,7 @@
/**
* Return if the consumer configuration of this stream is deferred.
*/
- virtual bool isConsumerConfigurationDeferred() const;
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
/**
* Set the consumer surface to the output stream.
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 7229929..1e76a27 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -124,6 +124,7 @@
int format,
android_dataspace dataSpace,
camera3_stream_rotation_t rotation,
+ uint32_t consumerUsage, nsecs_t timestampOffset,
int setId) :
Camera3IOStreamBase(id, type, width, height,
/*maxSize*/0,
@@ -132,7 +133,8 @@
mTraceFirstBuffer(true),
mUseMonoTimestamp(false),
mUseBufferManager(false),
- mConsumerUsage(0) {
+ mTimestampOffset(timestampOffset),
+ mConsumerUsage(consumerUsage) {
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
mBufferReleasedListener = new BufferReleasedListener(this);
@@ -373,6 +375,24 @@
return res;
}
+ if ((res = configureConsumerQueueLocked()) != OK) {
+ return res;
+ }
+
+ // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+ // We need skip these cases as timeout will disable the non-blocking (async) mode.
+ if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
+ mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+ }
+
+ return OK;
+}
+
+status_t Camera3OutputStream::configureConsumerQueueLocked() {
+ status_t res;
+
+ mTraceFirstBuffer = true;
+
ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
// Configure consumer-side ANativeWindow interface. The listener may be used
@@ -470,12 +490,7 @@
if (res != OK) {
ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
__FUNCTION__, mTransform, strerror(-res), res);
- }
-
- // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
- // We need skip these cases as timeout will disable the non-blocking (async) mode.
- if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
- mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+ return res;
}
/**
@@ -568,14 +583,24 @@
status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) const {
status_t res;
- int32_t u = 0;
+
if (mConsumer == nullptr) {
// mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
*usage = mConsumerUsage;
return OK;
}
- res = static_cast<ANativeWindow*>(mConsumer.get())->query(mConsumer.get(),
+ res = getEndpointUsageForSurface(usage, mConsumer);
+
+ return res;
+}
+
+status_t Camera3OutputStream::getEndpointUsageForSurface(uint32_t *usage,
+ const sp<Surface>& surface) const {
+ status_t res;
+ int32_t u = 0;
+
+ res = static_cast<ANativeWindow*>(surface.get())->query(surface.get(),
NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
// If an opaque output stream's endpoint is ImageReader, add
@@ -587,8 +612,8 @@
// 3. GRALLOC_USAGE_HW_COMPOSER
// 4. GRALLOC_USAGE_HW_VIDEO_ENCODER
if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
- (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER |
- GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
+ (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
+ GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
u |= GRALLOC_USAGE_HW_CAMERA_ZSL;
}
@@ -676,8 +701,17 @@
return OK;
}
-bool Camera3OutputStream::isConsumerConfigurationDeferred() const {
+status_t Camera3OutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+ const std::vector<size_t>& /*surface_ids*/) {
+ return OK;
+}
+
+bool Camera3OutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
Mutex::Autolock l(mLock);
+
+ if (surface_id != 0) {
+ ALOGE("%s: surface_id for Camera3OutputStream should be 0!", __FUNCTION__);
+ }
return mConsumer == nullptr;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 12d497e..26ea63f 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -135,7 +135,7 @@
/**
* Return if the consumer configuration of this stream is deferred.
*/
- virtual bool isConsumerConfigurationDeferred() const;
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
/**
* Set the consumer surface to the output stream.
@@ -158,6 +158,9 @@
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
+ virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids);
+
/**
* Set the graphic buffer manager to get/return the stream buffers.
*
@@ -169,6 +172,7 @@
Camera3OutputStream(int id, camera3_stream_type_t type,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+ uint32_t consumerUsage = 0, nsecs_t timestampOffset = 0,
int setId = CAMERA3_STREAM_SET_ID_INVALID);
/**
@@ -183,12 +187,19 @@
virtual status_t disconnectLocked();
+ status_t getEndpointUsageForSurface(uint32_t *usage,
+ const sp<Surface>& surface) const;
+ status_t configureConsumerQueueLocked();
+
+ // Consumer as the output of camera HAL
sp<Surface> mConsumer;
- private:
+ uint32_t getPresetConsumerUsage() const { return mConsumerUsage; }
static const nsecs_t kDequeueBufferTimeout = 1000000000; // 1 sec
+ private:
+
int mTransform;
virtual status_t setTransformLocked(int transform);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index 3f83c89..6a911c6 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -43,7 +43,7 @@
/**
* Return if the consumer configuration of this stream is deferred.
*/
- virtual bool isConsumerConfigurationDeferred() const = 0;
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id = 0) const = 0;
/**
* Set the consumer surface to the output stream.
@@ -59,6 +59,20 @@
*
*/
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) = 0;
+
+ /**
+ * Notify which surfaces are requested for a particular frame number.
+ *
+ * Mulitple surfaces could share the same output stream, but a request may
+ * be only for a subset of surfaces. In this case, the
+ * Camera3OutputStreamInterface object needs to manage the output surfaces on
+ * a per request basis.
+ *
+ * If there is only one surface for this output stream, calling this
+ * function is a no-op.
+ */
+ virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids) = 0;
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
new file mode 100644
index 0000000..b419e06
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Camera3SharedOutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3SharedOutputStream::Camera3SharedOutputStream(int id,
+ const std::vector<sp<Surface>>& surfaces,
+ bool hasDeferredSurface,
+ uint32_t width, uint32_t height, int format,
+ uint32_t consumerUsage, android_dataspace dataSpace,
+ camera3_stream_rotation_t rotation,
+ nsecs_t timestampOffset, int setId) :
+ Camera3OutputStream(id, CAMERA3_STREAM_OUTPUT, width, height,
+ format, dataSpace, rotation, consumerUsage,
+ timestampOffset, setId),
+ mSurfaces(surfaces),
+ mDeferred(hasDeferredSurface) {
+}
+
+Camera3SharedOutputStream::~Camera3SharedOutputStream() {
+ disconnectLocked();
+}
+
+status_t Camera3SharedOutputStream::connectStreamSplitterLocked() {
+ status_t res = OK;
+
+ mStreamSplitter = new Camera3StreamSplitter();
+
+ uint32_t usage;
+ getEndpointUsage(&usage);
+
+ res = mStreamSplitter->connect(mSurfaces, usage, camera3_stream::max_buffers, mConsumer);
+ if (res != OK) {
+ ALOGE("%s: Failed to connect to stream splitter: %s(%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ return res;
+}
+
+status_t Camera3SharedOutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+ const std::vector<size_t>& surface_ids) {
+ Mutex::Autolock l(mLock);
+ status_t res = OK;
+
+ if (mStreamSplitter != nullptr) {
+ res = mStreamSplitter->notifyRequestedSurfaces(surface_ids);
+ }
+
+ return res;
+}
+
+bool Camera3SharedOutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
+ Mutex::Autolock l(mLock);
+ return (mDeferred && surface_id >= mSurfaces.size());
+}
+
+status_t Camera3SharedOutputStream::setConsumer(sp<Surface> surface) {
+ if (surface == nullptr) {
+ ALOGE("%s: it's illegal to set a null consumer surface!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ if (!mDeferred) {
+ ALOGE("%s: Current stream isn't deferred!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ mSurfaces.push_back(surface);
+
+ return mStreamSplitter->addOutput(surface, camera3_stream::max_buffers);
+}
+
+status_t Camera3SharedOutputStream::configureQueueLocked() {
+ status_t res;
+
+ if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
+ return res;
+ }
+
+ res = connectStreamSplitterLocked();
+ if (res != OK) {
+ ALOGE("Cannot connect to stream splitter: %s(%d)", strerror(-res), res);
+ return res;
+ }
+
+ res = configureConsumerQueueLocked();
+ if (res != OK) {
+ ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
+ return res;
+ }
+
+ return OK;
+}
+
+status_t Camera3SharedOutputStream::disconnectLocked() {
+ status_t res;
+ res = Camera3OutputStream::disconnectLocked();
+
+ if (mStreamSplitter != nullptr) {
+ mStreamSplitter->disconnect();
+ }
+
+ return res;
+}
+
+status_t Camera3SharedOutputStream::getEndpointUsage(uint32_t *usage) const {
+
+ status_t res;
+ uint32_t u = 0;
+
+ if (mConsumer == nullptr) {
+ // Called before shared buffer queue is constructed.
+ *usage = getPresetConsumerUsage();
+
+ for (auto surface : mSurfaces) {
+ if (surface != nullptr) {
+ res = getEndpointUsageForSurface(&u, surface);
+ *usage |= u;
+ }
+ }
+ } else {
+ // Called after shared buffer queue is constructed.
+ res = getEndpointUsageForSurface(&u, mConsumer);
+ *usage |= u;
+ }
+
+ return res;
+}
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
new file mode 100644
index 0000000..1b37d7c
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+
+#include "Camera3StreamSplitter.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3SharedOutputStream :
+ public Camera3OutputStream {
+public:
+ /**
+ * Set up a stream for formats that have 2 dimensions, with multiple
+ * surfaces. A valid stream set id needs to be set to support buffer
+ * sharing between multiple streams.
+ */
+ Camera3SharedOutputStream(int id, const std::vector<sp<Surface>>& surfaces,
+ bool hasDeferredSurface, uint32_t width, uint32_t height, int format,
+ uint32_t consumerUsage, android_dataspace dataSpace,
+ camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+ int setId = CAMERA3_STREAM_SET_ID_INVALID);
+
+ virtual ~Camera3SharedOutputStream();
+
+ virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids);
+
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
+
+ virtual status_t setConsumer(sp<Surface> consumer);
+
+private:
+ // Surfaces passed in constructor from app
+ std::vector<sp<Surface> > mSurfaces;
+
+ /**
+ * The Camera3StreamSplitter object this stream uses for stream
+ * sharing.
+ */
+ sp<Camera3StreamSplitter> mStreamSplitter;
+
+ /**
+ * Initialize stream splitter.
+ */
+ status_t connectStreamSplitterLocked();
+
+ virtual status_t configureQueueLocked();
+
+ virtual status_t disconnectLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage) const;
+
+ bool mDeferred;
+
+}; // class Camera3SharedOutputStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
new file mode 100644
index 0000000..b935141
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -0,0 +1,441 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#define LOG_TAG "Camera3StreamSplitter"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <gui/BufferItem.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/BufferQueue.h>
+#include <gui/Surface.h>
+
+#include <ui/GraphicBuffer.h>
+
+#include <binder/ProcessState.h>
+
+#include <utils/Trace.h>
+
+#include "Camera3StreamSplitter.h"
+
+namespace android {
+
+status_t Camera3StreamSplitter::connect(const std::vector<sp<Surface> >& surfaces,
+ uint32_t consumerUsage, size_t hal_max_buffers,
+ sp<Surface>& consumer) {
+ if (consumer != nullptr) {
+ ALOGE("%s: output Surface is not NULL", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mMutex);
+ status_t res = OK;
+
+ if (mOutputs.size() > 0 || mConsumer != nullptr) {
+ ALOGE("%s: StreamSplitter already connected", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ // Add output surfaces. This has to be before creating internal buffer queue
+ // in order to get max consumer side buffers.
+ for (size_t i = 0; i < surfaces.size(); i++) {
+ if (surfaces[i] != nullptr) {
+ res = addOutputLocked(surfaces[i], hal_max_buffers,
+ OutputType::NonDeferred);
+ if (res != OK) {
+ ALOGE("%s: Failed to add output surface: %s(%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ }
+ }
+
+ // Create buffer queue for input
+ BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+
+ mBufferItemConsumer = new BufferItemConsumer(mConsumer, consumerUsage,
+ mMaxConsumerBuffers);
+ if (mBufferItemConsumer == nullptr) {
+ return NO_MEMORY;
+ }
+ mConsumer->setConsumerName(getUniqueConsumerName());
+
+ mSurface = new Surface(mProducer);
+ if (mSurface == nullptr) {
+ return NO_MEMORY;
+ }
+ consumer = mSurface;
+
+ res = mConsumer->consumerConnect(this, /* controlledByApp */ false);
+
+ return res;
+}
+
+void Camera3StreamSplitter::disconnect() {
+ Mutex::Autolock lock(mMutex);
+
+ for (auto& output : mOutputs) {
+ output->disconnect(NATIVE_WINDOW_API_CAMERA);
+ }
+ mOutputs.clear();
+
+ if (mConsumer != nullptr) {
+ mConsumer->consumerDisconnect();
+ mConsumer.clear();
+ }
+
+ if (mBuffers.size() > 0) {
+ ALOGI("%zu buffers still being tracked", mBuffers.size());
+ }
+}
+
+Camera3StreamSplitter::~Camera3StreamSplitter() {
+ disconnect();
+}
+
+status_t Camera3StreamSplitter::addOutput(
+ sp<Surface>& outputQueue, size_t hal_max_buffers) {
+ Mutex::Autolock lock(mMutex);
+ return addOutputLocked(outputQueue, hal_max_buffers, OutputType::Deferred);
+}
+
+status_t Camera3StreamSplitter::addOutputLocked(
+ const sp<Surface>& outputQueue, size_t hal_max_buffers,
+ OutputType outputType) {
+ if (outputQueue == nullptr) {
+ ALOGE("addOutput: outputQueue must not be NULL");
+ return BAD_VALUE;
+ }
+ if (hal_max_buffers < 1) {
+ ALOGE("%s: Camera HAL requested max_buffer count: %zu, requires at least 1",
+ __FUNCTION__, hal_max_buffers);
+ return BAD_VALUE;
+ }
+
+ sp<IGraphicBufferProducer> gbp = outputQueue->getIGraphicBufferProducer();
+ // Connect to the buffer producer
+ IGraphicBufferProducer::QueueBufferOutput queueBufferOutput;
+ sp<OutputListener> listener(new OutputListener(this, gbp));
+ IInterface::asBinder(gbp)->linkToDeath(listener);
+ status_t status = gbp->connect(listener, NATIVE_WINDOW_API_CAMERA,
+ /* producerControlledByApp */ true, &queueBufferOutput);
+ if (status != NO_ERROR) {
+ ALOGE("addOutput: failed to connect (%d)", status);
+ return status;
+ }
+
+ // Query consumer side buffer count, and update overall buffer count
+ int maxConsumerBuffers = 0;
+ status = static_cast<ANativeWindow*>(outputQueue.get())->query(
+ outputQueue.get(),
+ NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
+ if (status != OK) {
+ ALOGE("%s: Unable to query consumer undequeued buffer count"
+ " for surface", __FUNCTION__);
+ return status;
+ }
+
+ if (maxConsumerBuffers > mMaxConsumerBuffers) {
+ if (outputType == OutputType::Deferred) {
+ ALOGE("%s: Fatal: Deferred surface has higher consumer buffer count"
+ " %d than what's already configured %d", __FUNCTION__,
+ maxConsumerBuffers, mMaxConsumerBuffers);
+ return BAD_VALUE;
+ }
+ mMaxConsumerBuffers = maxConsumerBuffers;
+ }
+
+ ALOGV("%s: Consumer wants %d buffers, HAL wants %zu", __FUNCTION__,
+ maxConsumerBuffers, hal_max_buffers);
+ size_t totalBufferCount = maxConsumerBuffers + hal_max_buffers;
+ status = native_window_set_buffer_count(outputQueue.get(),
+ totalBufferCount);
+ if (status != OK) {
+ ALOGE("%s: Unable to set buffer count for surface %p",
+ __FUNCTION__, outputQueue.get());
+ return status;
+ }
+
+ // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+ // We need skip these cases as timeout will disable the non-blocking (async) mode.
+ int32_t usage = 0;
+ static_cast<ANativeWindow*>(outputQueue.get())->query(
+ outputQueue.get(),
+ NATIVE_WINDOW_CONSUMER_USAGE_BITS, &usage);
+ if (!(usage & (GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_TEXTURE))) {
+ outputQueue->setDequeueTimeout(kDequeueBufferTimeout);
+ }
+
+ status = gbp->allowAllocation(false);
+ if (status != OK) {
+ ALOGE("%s: Failed to turn off allocation for outputQueue", __FUNCTION__);
+ return status;
+ }
+
+ // Add new entry into mOutputs
+ mOutputs.push_back(gbp);
+ return NO_ERROR;
+}
+
+String8 Camera3StreamSplitter::getUniqueConsumerName() {
+ static volatile int32_t counter = 0;
+ return String8::format("Camera3StreamSplitter-%d", android_atomic_inc(&counter));
+}
+
+status_t Camera3StreamSplitter::notifyRequestedSurfaces(
+ const std::vector<size_t>& surfaces) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ mRequestedSurfaces.push_back(surfaces);
+ return OK;
+}
+
+
+void Camera3StreamSplitter::onFrameAvailable(const BufferItem& /* item */) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ // The current policy is that if any one consumer is consuming buffers too
+ // slowly, the splitter will stall the rest of the outputs by not acquiring
+ // any more buffers from the input. This will cause back pressure on the
+ // input queue, slowing down its producer.
+
+ // If there are too many outstanding buffers, we block until a buffer is
+ // released back to the input in onBufferReleased
+ while (mOutstandingBuffers >= mMaxConsumerBuffers) {
+ mReleaseCondition.wait(mMutex);
+
+ // If the splitter is abandoned while we are waiting, the release
+ // condition variable will be broadcast, and we should just return
+ // without attempting to do anything more (since the input queue will
+ // also be abandoned).
+ if (mIsAbandoned) {
+ return;
+ }
+ }
+ // If the splitter is abandoned without reaching mMaxConsumerBuffers, just
+ // return without attempting to do anything more.
+ if (mIsAbandoned) {
+ return;
+ }
+
+ ++mOutstandingBuffers;
+
+ // Acquire and detach the buffer from the input
+ BufferItem bufferItem;
+ status_t status = mConsumer->acquireBuffer(&bufferItem, /* presentWhen */ 0);
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "acquiring buffer from input failed (%d)", status);
+
+ ALOGV("acquired buffer %#" PRIx64 " from input",
+ bufferItem.mGraphicBuffer->getId());
+
+ status = mConsumer->detachBuffer(bufferItem.mSlot);
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "detaching buffer from input failed (%d)", status);
+
+ IGraphicBufferProducer::QueueBufferInput queueInput(
+ bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp,
+ bufferItem.mDataSpace, bufferItem.mCrop,
+ static_cast<int32_t>(bufferItem.mScalingMode),
+ bufferItem.mTransform, bufferItem.mFence);
+
+ // Attach and queue the buffer to each of the outputs
+ std::vector<std::vector<size_t> >::iterator surfaces = mRequestedSurfaces.begin();
+ if (surfaces != mRequestedSurfaces.end()) {
+
+ LOG_ALWAYS_FATAL_IF(surfaces->size() == 0,
+ "requested surface ids shouldn't be empty");
+
+ // Initialize our reference count for this buffer
+ mBuffers[bufferItem.mGraphicBuffer->getId()] =
+ std::unique_ptr<BufferTracker>(
+ new BufferTracker(bufferItem.mGraphicBuffer, surfaces->size()));
+
+ for (auto id : *surfaces) {
+
+ LOG_ALWAYS_FATAL_IF(id >= mOutputs.size(),
+ "requested surface id exceeding max registered ids");
+
+ int slot = BufferItem::INVALID_BUFFER_SLOT;
+ status = mOutputs[id]->attachBuffer(&slot, bufferItem.mGraphicBuffer);
+ if (status == NO_INIT) {
+ // If we just discovered that this output has been abandoned, note
+ // that, decrement the reference count so that we still release this
+ // buffer eventually, and move on to the next output
+ onAbandonedLocked();
+ mBuffers[bufferItem.mGraphicBuffer->getId()]->
+ decrementReferenceCountLocked();
+ continue;
+ } else if (status == WOULD_BLOCK) {
+ // If the output is async, attachBuffer may return WOULD_BLOCK
+ // indicating number of dequeued buffers has reached limit. In
+ // this case, simply decrement the reference count, and move on
+ // to the next output.
+ // TODO: Do we need to report BUFFER_ERROR for this result?
+ mBuffers[bufferItem.mGraphicBuffer->getId()]->
+ decrementReferenceCountLocked();
+ continue;
+ } else if (status == TIMED_OUT) {
+ // If attachBuffer times out due to the value set by
+ // setDequeueTimeout, simply decrement the reference count, and
+ // move on to the next output.
+ // TODO: Do we need to report BUFFER_ERROR for this result?
+ mBuffers[bufferItem.mGraphicBuffer->getId()]->
+ decrementReferenceCountLocked();
+ continue;
+ } else {
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "attaching buffer to output failed (%d)", status);
+ }
+
+ IGraphicBufferProducer::QueueBufferOutput queueOutput;
+ status = mOutputs[id]->queueBuffer(slot, queueInput, &queueOutput);
+ if (status == NO_INIT) {
+ // If we just discovered that this output has been abandoned, note
+ // that, increment the release count so that we still release this
+ // buffer eventually, and move on to the next output
+ onAbandonedLocked();
+ mBuffers[bufferItem.mGraphicBuffer->getId()]->
+ decrementReferenceCountLocked();
+ continue;
+ } else {
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "queueing buffer to output failed (%d)", status);
+ }
+
+ ALOGV("queued buffer %#" PRIx64 " to output %p",
+ bufferItem.mGraphicBuffer->getId(), mOutputs[id].get());
+ }
+
+ mRequestedSurfaces.erase(surfaces);
+ }
+}
+
+void Camera3StreamSplitter::onBufferReleasedByOutput(
+ const sp<IGraphicBufferProducer>& from) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ sp<GraphicBuffer> buffer;
+ sp<Fence> fence;
+ status_t status = from->detachNextBuffer(&buffer, &fence);
+ if (status == NO_INIT) {
+ // If we just discovered that this output has been abandoned, note that,
+ // but we can't do anything else, since buffer is invalid
+ onAbandonedLocked();
+ return;
+ } else {
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "detaching buffer from output failed (%d)", status);
+ }
+
+ ALOGV("detached buffer %#" PRIx64 " from output %p",
+ buffer->getId(), from.get());
+
+ BufferTracker& tracker = *(mBuffers[buffer->getId()]);
+
+ // Merge the release fence of the incoming buffer so that the fence we send
+ // back to the input includes all of the outputs' fences
+ tracker.mergeFence(fence);
+
+ // Check to see if this is the last outstanding reference to this buffer
+ size_t referenceCount = tracker.decrementReferenceCountLocked();
+ ALOGV("buffer %#" PRIx64 " reference count %zu", buffer->getId(),
+ referenceCount);
+ if (referenceCount > 0) {
+ return;
+ }
+
+ // If we've been abandoned, we can't return the buffer to the input, so just
+ // stop tracking it and move on
+ if (mIsAbandoned) {
+ mBuffers.erase(buffer->getId());
+ return;
+ }
+
+ // Attach and release the buffer back to the input
+ int consumerSlot = BufferItem::INVALID_BUFFER_SLOT;
+ status = mConsumer->attachBuffer(&consumerSlot, tracker.getBuffer());
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "attaching buffer to input failed (%d)", status);
+
+ status = mConsumer->releaseBuffer(consumerSlot, /* frameNumber */ 0,
+ EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, tracker.getMergedFence());
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "releasing buffer to input failed (%d)", status);
+
+ ALOGV("released buffer %#" PRIx64 " to input", buffer->getId());
+
+ // We no longer need to track the buffer once it has been returned to the
+ // input
+ mBuffers.erase(buffer->getId());
+
+ // Notify any waiting onFrameAvailable calls
+ --mOutstandingBuffers;
+ mReleaseCondition.signal();
+}
+
+void Camera3StreamSplitter::onAbandonedLocked() {
+ ALOGE("one of my outputs has abandoned me");
+ if (!mIsAbandoned && mConsumer != nullptr) {
+ mConsumer->consumerDisconnect();
+ }
+ mIsAbandoned = true;
+ mReleaseCondition.broadcast();
+}
+
+Camera3StreamSplitter::OutputListener::OutputListener(
+ wp<Camera3StreamSplitter> splitter,
+ wp<IGraphicBufferProducer> output)
+ : mSplitter(splitter), mOutput(output) {}
+
+void Camera3StreamSplitter::OutputListener::onBufferReleased() {
+ sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+ sp<IGraphicBufferProducer> output = mOutput.promote();
+ if (splitter != nullptr && output != nullptr) {
+ splitter->onBufferReleasedByOutput(output);
+ }
+}
+
+void Camera3StreamSplitter::OutputListener::binderDied(const wp<IBinder>& /* who */) {
+ sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+ if (splitter != nullptr) {
+ Mutex::Autolock lock(splitter->mMutex);
+ splitter->onAbandonedLocked();
+ }
+}
+
+Camera3StreamSplitter::BufferTracker::BufferTracker(
+ const sp<GraphicBuffer>& buffer, size_t referenceCount)
+ : mBuffer(buffer), mMergedFence(Fence::NO_FENCE),
+ mReferenceCount(referenceCount) {}
+
+void Camera3StreamSplitter::BufferTracker::mergeFence(const sp<Fence>& with) {
+ mMergedFence = Fence::merge(String8("Camera3StreamSplitter"), mMergedFence, with);
+}
+
+size_t Camera3StreamSplitter::BufferTracker::decrementReferenceCountLocked() {
+ if (mReferenceCount > 0)
+ --mReferenceCount;
+ return mReferenceCount;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
new file mode 100644
index 0000000..5a25712
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_STREAMSPLITTER_H
+#define ANDROID_SERVERS_STREAMSPLITTER_H
+
+#include <gui/IConsumerListener.h>
+#include <gui/IProducerListener.h>
+#include <gui/BufferItemConsumer.h>
+
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class IGraphicBufferProducer;
+
+// Camera3StreamSplitter is an autonomous class that manages one input BufferQueue
+// and multiple output BufferQueues. By using the buffer attach and detach logic
+// in BufferQueue, it is able to present the illusion of a single split
+// BufferQueue, where each buffer queued to the input is available to be
+// acquired by each of the outputs, and is able to be dequeued by the input
+// again only once all of the outputs have released it.
+class Camera3StreamSplitter : public BnConsumerListener {
+public:
+
+ // Constructor
+ Camera3StreamSplitter() = default;
+
+ // Connect to the stream splitter by creating buffer queue and connecting it
+ // with output surfaces.
+ status_t connect(const std::vector<sp<Surface> >& surfaces,
+ uint32_t consumerUsage, size_t hal_max_buffers,
+ sp<Surface>& consumer);
+
+ // addOutput adds an output BufferQueue to the splitter. The splitter
+ // connects to outputQueue as a CPU producer, and any buffers queued
+ // to the input will be queued to each output. It is assumed that all of the
+ // outputs are added before any buffers are queued on the input. If any
+ // output is abandoned by its consumer, the splitter will abandon its input
+ // queue (see onAbandoned).
+ //
+ // A return value other than NO_ERROR means that an error has occurred and
+ // outputQueue has not been added to the splitter. BAD_VALUE is returned if
+ // outputQueue is NULL. See IGraphicBufferProducer::connect for explanations
+ // of other error codes.
+ status_t addOutput(sp<Surface>& outputQueue, size_t hal_max_buffers);
+
+ // Request surfaces for a particular frame number. The requested surfaces
+ // are stored in a FIFO queue. And when the buffer becomes available from the
+ // input queue, the registered surfaces are used to decide which output is
+ // the buffer sent to.
+ status_t notifyRequestedSurfaces(const std::vector<size_t>& surfaces);
+
+ // Disconnect the buffer queue from output surfaces.
+ void disconnect();
+
+private:
+ // From IConsumerListener
+ //
+ // During this callback, we store some tracking information, detach the
+ // buffer from the input, and attach it to each of the outputs. This call
+ // can block if there are too many outstanding buffers. If it blocks, it
+ // will resume when onBufferReleasedByOutput releases a buffer back to the
+ // input.
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // From IConsumerListener
+ // We don't care about released buffers because we detach each buffer as
+ // soon as we acquire it. See the comment for onBufferReleased below for
+ // some clarifying notes about the name.
+ void onBuffersReleased() override {}
+
+ // From IConsumerListener
+ // We don't care about sideband streams, since we won't be splitting them
+ void onSidebandStreamChanged() override {}
+
+ // This is the implementation of the onBufferReleased callback from
+ // IProducerListener. It gets called from an OutputListener (see below), and
+ // 'from' is which producer interface from which the callback was received.
+ //
+ // During this callback, we detach the buffer from the output queue that
+ // generated the callback, update our state tracking to see if this is the
+ // last output releasing the buffer, and if so, release it to the input.
+ // If we release the buffer to the input, we allow a blocked
+ // onFrameAvailable call to proceed.
+ void onBufferReleasedByOutput(const sp<IGraphicBufferProducer>& from);
+
+ // When this is called, the splitter disconnects from (i.e., abandons) its
+ // input queue and signals any waiting onFrameAvailable calls to wake up.
+ // It still processes callbacks from other outputs, but only detaches their
+ // buffers so they can continue operating until they run out of buffers to
+ // acquire. This must be called with mMutex locked.
+ void onAbandonedLocked();
+
+ // This is a thin wrapper class that lets us determine which BufferQueue
+ // the IProducerListener::onBufferReleased callback is associated with. We
+ // create one of these per output BufferQueue, and then pass the producer
+ // into onBufferReleasedByOutput above.
+ class OutputListener : public BnProducerListener,
+ public IBinder::DeathRecipient {
+ public:
+ OutputListener(wp<Camera3StreamSplitter> splitter,
+ wp<IGraphicBufferProducer> output);
+ virtual ~OutputListener() = default;
+
+ // From IProducerListener
+ void onBufferReleased() override;
+
+ // From IBinder::DeathRecipient
+ void binderDied(const wp<IBinder>& who) override;
+
+ private:
+ wp<Camera3StreamSplitter> mSplitter;
+ wp<IGraphicBufferProducer> mOutput;
+ };
+
+ class BufferTracker {
+ public:
+ BufferTracker(const sp<GraphicBuffer>& buffer, size_t referenceCount);
+ ~BufferTracker() = default;
+
+ const sp<GraphicBuffer>& getBuffer() const { return mBuffer; }
+ const sp<Fence>& getMergedFence() const { return mMergedFence; }
+
+ void mergeFence(const sp<Fence>& with);
+
+ // Returns the new value
+ // Only called while mMutex is held
+ size_t decrementReferenceCountLocked();
+
+ private:
+
+ // Disallow copying
+ BufferTracker(const BufferTracker& other);
+ BufferTracker& operator=(const BufferTracker& other);
+
+ sp<GraphicBuffer> mBuffer; // One instance that holds this native handle
+ sp<Fence> mMergedFence;
+ size_t mReferenceCount;
+ };
+
+ // A deferred output is an output being added to the splitter after
+ // connect() call, whereas a non deferred output is added within connect()
+ // call.
+ enum class OutputType { NonDeferred, Deferred };
+
+ // Must be accessed through RefBase
+ virtual ~Camera3StreamSplitter();
+
+ status_t addOutputLocked(const sp<Surface>& outputQueue,
+ size_t hal_max_buffers, OutputType outputType);
+
+ // Get unique name for the buffer queue consumer
+ static String8 getUniqueConsumerName();
+
+ // Max consumer side buffers for deferred surface. This will be used as a
+ // lower bound for overall consumer side max buffers.
+ static const int MAX_BUFFERS_DEFERRED_OUTPUT = 2;
+ int mMaxConsumerBuffers = MAX_BUFFERS_DEFERRED_OUTPUT;
+
+ static const nsecs_t kDequeueBufferTimeout = s2ns(1); // 1 sec
+
+ // mIsAbandoned is set to true when an output dies. Once the Camera3StreamSplitter
+ // has been abandoned, it will continue to detach buffers from other
+ // outputs, but it will disconnect from the input and not attempt to
+ // communicate with it further.
+ bool mIsAbandoned = false;
+
+ Mutex mMutex;
+ Condition mReleaseCondition;
+ int mOutstandingBuffers = 0;
+
+ sp<IGraphicBufferProducer> mProducer;
+ sp<IGraphicBufferConsumer> mConsumer;
+ sp<BufferItemConsumer> mBufferItemConsumer;
+ sp<Surface> mSurface;
+
+ std::vector<sp<IGraphicBufferProducer> > mOutputs;
+ // Tracking which outputs should the buffer be attached and queued
+ // to for each input buffer.
+ std::vector<std::vector<size_t> > mRequestedSurfaces;
+
+ // Map of GraphicBuffer IDs (GraphicBuffer::getId()) to buffer tracking
+ // objects (which are mostly for counting how many outputs have released the
+ // buffer, but also contain merged release fences).
+ std::unordered_map<uint64_t, std::unique_ptr<BufferTracker> > mBuffers;
+};
+
+} // namespace android
+
+#endif