camera2: Add support for secondary surface for stream
- Enhance OutputConfiguration to contain multiple surfaces for one
underlying stream.
- Create Camera3SharedOutputStream to handle streams with multiple
surfaces.
- Create Camera3StreamSplitter to handle buffer flows between camera and
multiple consumers.
Test: cts, and manually test camera preview/snapshot/recording
Bug: 33777818
Change-Id: Ia010c3cc9d9b4bd5b9ea03cc42fe4e0a0d8033f1
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 12d0da8..1427e29 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -30,8 +30,9 @@
const int OutputConfiguration::INVALID_ROTATION = -1;
const int OutputConfiguration::INVALID_SET_ID = -1;
-sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
- return mGbp;
+const std::vector<sp<IGraphicBufferProducer>>&
+ OutputConfiguration::getGraphicBufferProducers() const {
+ return mGbps;
}
int OutputConfiguration::getRotation() const {
@@ -103,37 +104,60 @@
return err;
}
- view::Surface surfaceShim;
- if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
- // Read surface failure for deferred surface configuration is expected.
- if (surfaceType == SURFACE_TYPE_SURFACE_VIEW ||
- surfaceType == SURFACE_TYPE_SURFACE_TEXTURE) {
- ALOGV("%s: Get null surface from a deferred surface configuration (%dx%d)",
- __FUNCTION__, width, height);
- err = OK;
- } else {
- ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
- return err;
- }
+ // numSurfaces is the total number of surfaces for this OutputConfiguration,
+ // regardless the surface is deferred or not.
+ int numSurfaces = 0;
+ if ((err = parcel->readInt32(&numSurfaces)) != OK) {
+ ALOGE("%s: Failed to read maxSurfaces from parcel", __FUNCTION__);
+ return err;
+ }
+ if (numSurfaces < 1) {
+ ALOGE("%s: there has to be at least 1 surface per"
+ " outputConfiguration", __FUNCTION__);
+ return BAD_VALUE;
}
- mGbp = surfaceShim.graphicBufferProducer;
+ // Read all surfaces from parcel. If a surface is deferred, readFromPacel
+ // returns error, and a null surface is put into the mGbps. We assume all
+ // deferred surfaces are after non-deferred surfaces in the parcel.
+ // TODO: Need better way to detect deferred surface than using error
+ // return from readFromParcel.
+ std::vector<sp<IGraphicBufferProducer>> gbps;
+ for (int i = 0; i < numSurfaces; i++) {
+ view::Surface surfaceShim;
+ if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
+ // Read surface failure for deferred surface configuration is expected.
+ if ((surfaceType == SURFACE_TYPE_SURFACE_VIEW ||
+ surfaceType == SURFACE_TYPE_SURFACE_TEXTURE)) {
+ ALOGV("%s: Get null surface from a deferred surface configuration (%dx%d)",
+ __FUNCTION__, width, height);
+ err = OK;
+ } else {
+ ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
+ return err;
+ }
+ }
+ gbps.push_back(surfaceShim.graphicBufferProducer);
+ ALOGV("%s: OutputConfiguration: gbps[%d] : %p, name %s", __FUNCTION__,
+ i, gbps[i].get(), String8(surfaceShim.name).string());
+ }
+
mRotation = rotation;
mSurfaceSetID = setID;
mSurfaceType = surfaceType;
mWidth = width;
mHeight = height;
+ mGbps = std::move(gbps);
- ALOGV("%s: OutputConfiguration: bp = %p, name = %s, rotation = %d, setId = %d,"
- "surfaceType = %d", __FUNCTION__, mGbp.get(), String8(surfaceShim.name).string(),
- mRotation, mSurfaceSetID, mSurfaceType);
+ ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d",
+ __FUNCTION__, mRotation, mSurfaceSetID, mSurfaceType);
return err;
}
OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
int surfaceSetID) {
- mGbp = gbp;
+ mGbps.push_back(gbp);
mRotation = rotation;
mSurfaceSetID = surfaceSetID;
}
@@ -158,14 +182,53 @@
err = parcel->writeInt32(mHeight);
if (err != OK) return err;
- view::Surface surfaceShim;
- surfaceShim.name = String16("unknown_name"); // name of surface
- surfaceShim.graphicBufferProducer = mGbp;
-
- err = surfaceShim.writeToParcel(parcel);
+ int numSurfaces = mGbps.size();
+ err = parcel->writeInt32(numSurfaces);
if (err != OK) return err;
+ for (int i = 0; i < numSurfaces; i++) {
+ view::Surface surfaceShim;
+ surfaceShim.name = String16("unknown_name"); // name of surface
+ surfaceShim.graphicBufferProducer = mGbps[i];
+
+ err = surfaceShim.writeToParcel(parcel);
+ if (err != OK) return err;
+ }
+
return OK;
}
+bool OutputConfiguration::gbpsEqual(const OutputConfiguration& other) const {
+ const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+ other.getGraphicBufferProducers();
+
+ if (mGbps.size() != otherGbps.size()) {
+ return false;
+ }
+
+ for (size_t i = 0; i < mGbps.size(); i++) {
+ if (mGbps[i] != otherGbps[i]) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool OutputConfiguration::gbpsLessThan(const OutputConfiguration& other) const {
+ const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+ other.getGraphicBufferProducers();
+
+ if (mGbps.size() != otherGbps.size()) {
+ return mGbps.size() < otherGbps.size();
+ }
+
+ for (size_t i = 0; i < mGbps.size(); i++) {
+ if (mGbps[i] != otherGbps[i]) {
+ return mGbps[i] < otherGbps[i];
+ }
+ }
+
+ return false;
+}
}; // namespace android
diff --git a/include/camera/camera2/OutputConfiguration.h b/include/camera/camera2/OutputConfiguration.h
index cb04c0e..2961e2a 100644
--- a/include/camera/camera2/OutputConfiguration.h
+++ b/include/camera/camera2/OutputConfiguration.h
@@ -38,7 +38,7 @@
SURFACE_TYPE_SURFACE_VIEW = 0,
SURFACE_TYPE_SURFACE_TEXTURE = 1
};
- sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
+ const std::vector<sp<IGraphicBufferProducer>>& getGraphicBufferProducers() const;
int getRotation() const;
int getSurfaceSetID() const;
int getSurfaceType() const;
@@ -65,19 +65,18 @@
int surfaceSetID = INVALID_SET_ID);
bool operator == (const OutputConfiguration& other) const {
- return (mGbp == other.mGbp &&
- mRotation == other.mRotation &&
+ return ( mRotation == other.mRotation &&
mSurfaceSetID == other.mSurfaceSetID &&
mSurfaceType == other.mSurfaceType &&
mWidth == other.mWidth &&
- mHeight == other.mHeight);
+ mHeight == other.mHeight &&
+ gbpsEqual(other));
}
bool operator != (const OutputConfiguration& other) const {
return !(*this == other);
}
bool operator < (const OutputConfiguration& other) const {
if (*this == other) return false;
- if (mGbp != other.mGbp) return mGbp < other.mGbp;
if (mSurfaceSetID != other.mSurfaceSetID) {
return mSurfaceSetID < other.mSurfaceSetID;
}
@@ -90,15 +89,20 @@
if (mHeight != other.mHeight) {
return mHeight < other.mHeight;
}
+ if (mRotation != other.mRotation) {
+ return mRotation < other.mRotation;
+ }
- return mRotation < other.mRotation;
+ return gbpsLessThan(other);
}
bool operator > (const OutputConfiguration& other) const {
return (*this != other && !(*this < other));
}
+ bool gbpsEqual(const OutputConfiguration& other) const;
+ bool gbpsLessThan(const OutputConfiguration& other) const;
private:
- sp<IGraphicBufferProducer> mGbp;
+ std::vector<sp<IGraphicBufferProducer>> mGbps;
int mRotation;
int mSurfaceSetID;
int mSurfaceType;
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index d1edb56..5b4d10d 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -48,8 +48,10 @@
device3/Camera3OutputStream.cpp \
device3/Camera3ZslStream.cpp \
device3/Camera3DummyStream.cpp \
+ device3/Camera3SharedOutputStream.cpp \
device3/StatusTracker.cpp \
device3/Camera3BufferManager.cpp \
+ device3/Camera3StreamSplitter.cpp \
gui/RingBufferConsumer.cpp \
utils/CameraTraces.cpp \
utils/AutoConditionLock.cpp \
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index d490119..a55c23b 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -152,6 +152,7 @@
}
List<const CameraMetadata> metadataRequestList;
+ std::list<const SurfaceMap> surfaceMapList;
submitInfo->mRequestId = mRequestIdCounter;
uint32_t loopCounter = 0;
@@ -191,11 +192,11 @@
}
/**
- * Write in the output stream IDs which we calculate from
- * the capture request's list of surface targets
+ * Write in the output stream IDs and map from stream ID to surface ID
+ * which we calculate from the capture request's list of surface target
*/
+ SurfaceMap surfaceMap;
Vector<int32_t> outputStreamIds;
- outputStreamIds.setCapacity(request.mSurfaceList.size());
for (sp<Surface> surface : request.mSurfaceList) {
if (surface == 0) continue;
@@ -211,10 +212,16 @@
"Request targets Surface that is not part of current capture session");
}
- int streamId = mStreamMap.valueAt(idx);
- outputStreamIds.push_back(streamId);
- ALOGV("%s: Camera %s: Appending output stream %d to request",
- __FUNCTION__, mCameraIdStr.string(), streamId);
+ const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
+ if (surfaceMap.find(streamSurfaceId.streamId()) == surfaceMap.end()) {
+ surfaceMap[streamSurfaceId.streamId()] = std::vector<size_t>();
+ outputStreamIds.push_back(streamSurfaceId.streamId());
+ }
+ surfaceMap[streamSurfaceId.streamId()].push_back(streamSurfaceId.surfaceId());
+
+ ALOGV("%s: Camera %s: Appending output stream %d surface %d to request",
+ __FUNCTION__, mCameraIdStr.string(), streamSurfaceId.streamId(),
+ streamSurfaceId.surfaceId());
}
metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
@@ -231,11 +238,13 @@
loopCounter, requests.size());
metadataRequestList.push_back(metadata);
+ surfaceMapList.push_back(surfaceMap);
}
mRequestIdCounter++;
if (streaming) {
- err = mDevice->setStreamingRequestList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+ err = mDevice->setStreamingRequestList(metadataRequestList, surfaceMapList,
+ &(submitInfo->mLastFrameNumber));
if (err != OK) {
String8 msg = String8::format(
"Camera %s: Got error %s (%d) after trying to set streaming request",
@@ -248,7 +257,8 @@
mStreamingRequestId = submitInfo->mRequestId;
}
} else {
- err = mDevice->captureList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+ err = mDevice->captureList(metadataRequestList, surfaceMapList,
+ &(submitInfo->mLastFrameNumber));
if (err != OK) {
String8 msg = String8::format(
"Camera %s: Got error %s (%d) after trying to submit capture request",
@@ -312,8 +322,9 @@
}
binder::Status CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
- ALOGV("%s: ending configure (%d input stream, %zu output streams)",
- __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());
+ ALOGV("%s: ending configure (%d input stream, %zu output surfaces)",
+ __FUNCTION__, mInputStream.configured ? 1 : 0,
+ mStreamMap.size());
binder::Status res;
if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
@@ -376,7 +387,7 @@
}
bool isInput = false;
- ssize_t index = NAME_NOT_FOUND;
+ std::vector<sp<IBinder>> surfaces;
ssize_t dIndex = NAME_NOT_FOUND;
if (mInputStream.configured && mInputStream.id == streamId) {
@@ -384,26 +395,24 @@
} else {
// Guard against trying to delete non-created streams
for (size_t i = 0; i < mStreamMap.size(); ++i) {
- if (streamId == mStreamMap.valueAt(i)) {
- index = i;
+ if (streamId == mStreamMap.valueAt(i).streamId()) {
+ surfaces.push_back(mStreamMap.keyAt(i));
+ }
+ }
+
+ // See if this stream is one of the deferred streams.
+ for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
+ if (streamId == mDeferredStreams[i]) {
+ dIndex = i;
break;
}
}
- if (index == NAME_NOT_FOUND) {
- // See if this stream is one of the deferred streams.
- for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
- if (streamId == mDeferredStreams[i]) {
- dIndex = i;
- break;
- }
- }
- if (dIndex == NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
- " stream created yet", mCameraIdStr.string(), streamId);
- ALOGW("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
+ if (surfaces.empty() && dIndex == NAME_NOT_FOUND) {
+ String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
+ " stream created yet", mCameraIdStr.string(), streamId);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
}
@@ -418,10 +427,14 @@
} else {
if (isInput) {
mInputStream.configured = false;
- } else if (index != NAME_NOT_FOUND) {
- mStreamMap.removeItemsAt(index);
} else {
- mDeferredStreams.removeItemsAt(dIndex);
+ for (auto& surface : surfaces) {
+ mStreamMap.removeItem(surface);
+ }
+
+ if (dIndex != NAME_NOT_FOUND) {
+ mDeferredStreams.removeItemsAt(dIndex);
+ }
}
}
@@ -439,14 +452,39 @@
Mutex::Autolock icl(mBinderSerializationLock);
- sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
- bool deferredConsumer = bufferProducer == NULL;
+ const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
+ outputConfiguration.getGraphicBufferProducers();
+ size_t numBufferProducers = bufferProducers.size();
+
+ if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
+ ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
+ __FUNCTION__, bufferProducers.size(), MAX_SURFACES_PER_STREAM);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
+ }
+ if (numBufferProducers == 0) {
+ ALOGE("%s: GraphicBufferProducer count 0 is not valid", __FUNCTION__);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Malformed surface");
+ }
+ size_t deferredConsumerCnt = 0;
+ for (auto bufferProducer : bufferProducers) {
+ if (bufferProducer == nullptr) {
+ deferredConsumerCnt++;
+ }
+ }
+ if (deferredConsumerCnt > MAX_DEFERRED_SURFACES) {
+ ALOGE("%s: %zu deferred consumer is not supported", __FUNCTION__, deferredConsumerCnt);
+ return STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "More than %d deferred consumer", MAX_DEFERRED_SURFACES);
+ }
+ bool deferredConsumer = deferredConsumerCnt > 0;
+ bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 1;
int surfaceType = outputConfiguration.getSurfaceType();
bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
(surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
+
if (deferredConsumer && !validSurfaceType) {
ALOGE("%s: Target surface is invalid: bufferProducer = %p, surfaceType = %d.",
- __FUNCTION__, bufferProducer.get(), surfaceType);
+ __FUNCTION__, bufferProducers[0].get(), surfaceType);
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
}
@@ -454,103 +492,165 @@
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
+ std::vector<sp<Surface>> surfaces;
+ std::vector<sp<IBinder>> binders;
+ int streamWidth, streamHeight, streamFormat;
int width, height, format;
+ int32_t streamConsumerUsage;
int32_t consumerUsage;
- android_dataspace dataSpace;
+ android_dataspace dataSpace, streamDataSpace;
status_t err;
// Create stream for deferred surface case.
- if (deferredConsumer) {
+ if (deferredConsumerOnly) {
return createDeferredSurfaceStreamLocked(outputConfiguration, newStreamId);
}
- // Don't create multiple streams for the same target surface
- {
- ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
- if (index != NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %s: Surface already has a stream created for it "
- "(ID %zd)", mCameraIdStr.string(), index);
- ALOGW("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
+ bool isFirstSurface = true;
+ streamWidth = -1;
+ streamHeight = -1;
+ streamFormat = -1;
+ streamDataSpace = HAL_DATASPACE_UNKNOWN;
+ streamConsumerUsage = 0;
+
+ for (auto& bufferProducer : bufferProducers) {
+ if (bufferProducer == nullptr) {
+ continue;
}
- }
- // HACK b/10949105
- // Query consumer usage bits to set async operation mode for
- // GLConsumer using controlledByApp parameter.
- bool useAsync = false;
- if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
- &consumerUsage)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
- mCameraIdStr.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
- ALOGW("%s: Camera %s with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
- __FUNCTION__, mCameraIdStr.string(), consumerUsage);
- useAsync = true;
- }
+ // Don't create multiple streams for the same target surface
+ {
+ ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
+ if (index != NAME_NOT_FOUND) {
+ String8 msg = String8::format("Camera %s: Surface already has a stream created for it "
+ "(ID %zd)", mCameraIdStr.string(), index);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
+ }
+ }
- int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
- GRALLOC_USAGE_RENDERSCRIPT;
- int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
- GraphicBuffer::USAGE_HW_TEXTURE |
- GraphicBuffer::USAGE_HW_COMPOSER;
- bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
- (consumerUsage & allowedFlags) != 0;
+ // HACK b/10949105
+ // Query consumer usage bits to set async operation mode for
+ // GLConsumer using controlledByApp parameter.
+ bool useAsync = false;
+ if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+ &consumerUsage)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
+ ALOGW("%s: Camera %s with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
+ __FUNCTION__, mCameraIdStr.string(), consumerUsage);
+ useAsync = true;
+ }
- sp<IBinder> binder = IInterface::asBinder(bufferProducer);
- sp<Surface> surface = new Surface(bufferProducer, useAsync);
- ANativeWindow *anw = surface.get();
+ int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
+ GRALLOC_USAGE_RENDERSCRIPT;
+ int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
+ GraphicBuffer::USAGE_HW_TEXTURE |
+ GraphicBuffer::USAGE_HW_COMPOSER;
+ bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
+ (consumerUsage & allowedFlags) != 0;
- if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
- mCameraIdStr.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
- mCameraIdStr.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
- mCameraIdStr.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
- reinterpret_cast<int*>(&dataSpace))) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
- mCameraIdStr.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
+ sp<IBinder> binder = IInterface::asBinder(bufferProducer);
+ sp<Surface> surface = new Surface(bufferProducer, useAsync);
+ ANativeWindow *anw = surface.get();
- // FIXME: remove this override since the default format should be
- // IMPLEMENTATION_DEFINED. b/9487482
- if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
- format <= HAL_PIXEL_FORMAT_BGRA_8888) {
- ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
- __FUNCTION__, mCameraIdStr.string(), format);
- format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- }
+ if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+ reinterpret_cast<int*>(&dataSpace))) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
- // Round dimensions to the nearest dimensions available for this format
- if (flexibleConsumer && isPublicFormat(format) &&
- !CameraDeviceClient::roundBufferDimensionNearest(width, height,
- format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
- String8 msg = String8::format("Camera %s: No supported stream configurations with "
- "format %#x defined, failed to create output stream", mCameraIdStr.string(), format);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ // FIXME: remove this override since the default format should be
+ // IMPLEMENTATION_DEFINED. b/9487482
+ if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
+ format <= HAL_PIXEL_FORMAT_BGRA_8888) {
+ ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
+ __FUNCTION__, mCameraIdStr.string(), format);
+ format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ }
+ // Round dimensions to the nearest dimensions available for this format
+ if (flexibleConsumer && isPublicFormat(format) &&
+ !CameraDeviceClient::roundBufferDimensionNearest(width, height,
+ format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
+ String8 msg = String8::format("Camera %s: No supported stream configurations with "
+ "format %#x defined, failed to create output stream",
+ mCameraIdStr.string(), format);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (isFirstSurface) {
+ streamWidth = width;
+ streamHeight = height;
+ streamFormat = format;
+ streamDataSpace = dataSpace;
+ streamConsumerUsage = consumerUsage;
+ isFirstSurface = false;
+ }
+ if (width != streamWidth) {
+ String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
+ mCameraIdStr.string(), width, streamWidth);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (height != streamHeight) {
+ String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
+ mCameraIdStr.string(), height, streamHeight);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (format != streamFormat) {
+ String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
+ mCameraIdStr.string(), format, streamFormat);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (dataSpace != streamDataSpace) {
+ String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
+ mCameraIdStr.string(), dataSpace, streamDataSpace);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ //At the native side, there isn't a way to check whether 2 surfaces come from the same
+ //surface class type. Use usage flag to approximate the comparison.
+ //TODO: Support surfaces of different surface class type.
+ if (consumerUsage != streamConsumerUsage) {
+ String8 msg = String8::format(
+ "Camera %s:Surface usage flag doesn't match 0x%x vs 0x%x",
+ mCameraIdStr.string(), consumerUsage, streamConsumerUsage);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ binders.push_back(binder);
+ surfaces.push_back(surface);
}
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
- err = mDevice->createStream(surface, width, height, format, dataSpace,
+ err = mDevice->createStream(surfaces, deferredConsumer, width, height, format, dataSpace,
static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
&streamId, outputConfiguration.getSurfaceSetID());
@@ -559,11 +659,15 @@
"Camera %s: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
mCameraIdStr.string(), width, height, format, dataSpace, strerror(-err), err);
} else {
- mStreamMap.add(binder, streamId);
-
+ int i = 0;
+ for (auto& binder : binders) {
+ ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %d",
+ __FUNCTION__, binder.get(), streamId, i);
+ mStreamMap.add(binder, StreamSurfaceId(streamId, i++));
+ }
ALOGV("%s: Camera %s: Successfully created a new stream ID %d for output surface"
- " (%d x %d) with format 0x%x.",
- __FUNCTION__, mCameraIdStr.string(), streamId, width, height, format);
+ " (%d x %d) with format 0x%x.",
+ __FUNCTION__, mCameraIdStr.string(), streamId, width, height, format);
// Set transform flags to ensure preview to be rotated correctly.
res = setStreamTransformLocked(streamId);
@@ -600,7 +704,9 @@
consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
}
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
- err = mDevice->createStream(/*surface*/nullptr, width, height, format, dataSpace,
+ std::vector<sp<Surface>> noSurface;
+ err = mDevice->createStream(noSurface, /*hasDeferredConsumer*/true, width,
+ height, format, dataSpace,
static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
&streamId, outputConfiguration.getSurfaceSetID(), consumerUsage);
@@ -944,7 +1050,7 @@
// Guard against trying to prepare non-created streams
ssize_t index = NAME_NOT_FOUND;
for (size_t i = 0; i < mStreamMap.size(); ++i) {
- if (streamId == mStreamMap.valueAt(i)) {
+ if (streamId == mStreamMap.valueAt(i).streamId()) {
index = i;
break;
}
@@ -984,7 +1090,7 @@
// Guard against trying to prepare non-created streams
ssize_t index = NAME_NOT_FOUND;
for (size_t i = 0; i < mStreamMap.size(); ++i) {
- if (streamId == mStreamMap.valueAt(i)) {
+ if (streamId == mStreamMap.valueAt(i).streamId()) {
index = i;
break;
}
@@ -1032,7 +1138,7 @@
// Guard against trying to prepare non-created streams
ssize_t index = NAME_NOT_FOUND;
for (size_t i = 0; i < mStreamMap.size(); ++i) {
- if (streamId == mStreamMap.valueAt(i)) {
+ if (streamId == mStreamMap.valueAt(i).streamId()) {
index = i;
break;
}
@@ -1070,26 +1176,42 @@
Mutex::Autolock icl(mBinderSerializationLock);
- sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
+ const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
+ outputConfiguration.getGraphicBufferProducers();
// Client code should guarantee that the surface is from SurfaceView or SurfaceTexture.
- if (bufferProducer == NULL) {
- ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+ // And it's also saved in the last entry of graphicBufferProducer list
+ if (bufferProducers.size() == 0) {
+ ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
}
- // Check if this stram id is one of the deferred streams
- ssize_t index = NAME_NOT_FOUND;
- for (size_t i = 0; i < mDeferredStreams.size(); i++) {
- if (streamId == mDeferredStreams[i]) {
- index = i;
- break;
- }
+
+ // Right now, only first surface in the OutputConfiguration is allowed to be
+ // deferred. And all other surfaces are checked to be the same (not null) at
+ // the Java side.
+ sp<IGraphicBufferProducer> bufferProducer = bufferProducers[0];
+ if (bufferProducer == nullptr) {
+ ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Target Surface is invalid");
}
- if (index == NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %s: deferred surface is set to a unknown stream"
- "(ID %d)", mCameraIdStr.string(), streamId);
- ALOGW("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+
+ // Check if this stream id is one of the deferred only streams
+ ssize_t index = NAME_NOT_FOUND;
+ if (bufferProducers.size() == 1) {
+ for (size_t i = 0; i < mDeferredStreams.size(); i++) {
+ if (streamId == mDeferredStreams[i]) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index == NAME_NOT_FOUND) {
+ String8 msg = String8::format("Camera %s: deferred surface is set to a unknown stream"
+ "(ID %d)", mCameraIdStr.string(), streamId);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
}
if (!mDevice.get()) {
@@ -1116,8 +1238,12 @@
err = mDevice->setConsumerSurface(streamId, consumerSurface);
if (err == OK) {
sp<IBinder> binder = IInterface::asBinder(bufferProducer);
- mStreamMap.add(binder, streamId);
- mDeferredStreams.removeItemsAt(index);
+ ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %zu", __FUNCTION__,
+ binder.get(), streamId, bufferProducers.size()-1);
+ mStreamMap.add(binder, StreamSurfaceId(streamId, bufferProducers.size()-1));
+ if (index != NAME_NOT_FOUND) {
+ mDeferredStreams.removeItemsAt(index);
+ }
} else if (err == NO_INIT) {
res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Camera %s: Deferred surface is invalid: %s (%d)",
@@ -1152,9 +1278,11 @@
result.append(" No input stream configured.\n");
}
if (!mStreamMap.isEmpty()) {
- result.append(" Current output stream IDs:\n");
+ result.append(" Current output stream/surface IDs:\n");
for (size_t i = 0; i < mStreamMap.size(); i++) {
- result.appendFormat(" Stream %d\n", mStreamMap.valueAt(i));
+ result.appendFormat(" Stream %d Surface %d\n",
+ mStreamMap.valueAt(i).streamId(),
+ mStreamMap.valueAt(i).surfaceId());
}
} else if (!mDeferredStreams.isEmpty()) {
result.append(" Current deferred surface output stream IDs:\n");
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 2226dd2..047ccf2 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -180,6 +180,34 @@
status_t getRotationTransformLocked(/*out*/int32_t* transform);
private:
+ // StreamSurfaceId encapsulates streamId + surfaceId for a particular surface.
+ // streamId specifies the index of the stream the surface belongs to, and the
+ // surfaceId specifies the index of the surface within the stream. (one stream
+ // could contain multiple surfaces.)
+ class StreamSurfaceId final {
+ public:
+ StreamSurfaceId() {
+ mStreamId = -1;
+ mSurfaceId = -1;
+ }
+ StreamSurfaceId(int32_t streamId, int32_t surfaceId) {
+ mStreamId = streamId;
+ mSurfaceId = surfaceId;
+ }
+ int32_t streamId() const {
+ return mStreamId;
+ }
+ int32_t surfaceId() const {
+ return mSurfaceId;
+ }
+
+ private:
+ int32_t mStreamId;
+ int32_t mSurfaceId;
+
+ }; // class StreamSurfaceId
+
+private:
/** ICameraDeviceUser interface-related private members */
/** Preview callback related members */
@@ -216,8 +244,8 @@
//check if format is not custom format
static bool isPublicFormat(int32_t format);
- // IGraphicsBufferProducer binder -> Stream ID for output streams
- KeyedVector<sp<IBinder>, int> mStreamMap;
+ // IGraphicsBufferProducer binder -> Stream ID + Surface ID for output streams
+ KeyedVector<sp<IBinder>, StreamSurfaceId> mStreamMap;
struct InputStreamConfiguration {
bool configured;
@@ -238,6 +266,9 @@
// as there are no surfaces available and can not be put into mStreamMap. Once the deferred
// Surface is configured, the stream id will be moved to mStreamMap.
Vector<int32_t> mDeferredStreams;
+
+ static const int32_t MAX_SURFACES_PER_STREAM = 2;
+ static const int32_t MAX_DEFERRED_SURFACES = 1;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 40b368e..a873402 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
#define ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
+#include <list>
+
#include <utils/RefBase.h>
#include <utils/String8.h>
#include <utils/String16.h>
@@ -37,6 +39,9 @@
class CameraProviderManager;
+// Mapping of output stream index to surface ids
+typedef std::unordered_map<int, std::vector<size_t> > SurfaceMap;
+
/**
* Base interface for version >= 2 camera device classes, which interface to
* camera HAL device versions >= 2.
@@ -73,6 +78,7 @@
* Output lastFrameNumber is the expected last frame number of the list of requests.
*/
virtual status_t captureList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) = 0;
/**
@@ -88,6 +94,7 @@
* Output lastFrameNumber is the last frame number of the previous streaming request.
*/
virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) = 0;
/**
@@ -117,6 +124,19 @@
uint32_t consumerUsage = 0) = 0;
/**
+ * Create an output stream of the requested size, format, rotation and
+ * dataspace with a number of consumers.
+ *
+ * For HAL_PIXEL_FORMAT_BLOB formats, the width and height should be the
+ * logical dimensions of the buffer, not the number of bytes.
+ */
+ virtual status_t createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+ uint32_t consumerUsage = 0) = 0;
+
+ /**
* Create an input stream of width, height, and format.
*
* Return value is the stream ID if non-negative and an error if negative.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6f64dc3..ae62e74 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -53,6 +53,7 @@
#include "device3/Camera3InputStream.h"
#include "device3/Camera3ZslStream.h"
#include "device3/Camera3DummyStream.h"
+#include "device3/Camera3SharedOutputStream.h"
#include "CameraService.h"
using namespace android::camera3;
@@ -792,7 +793,9 @@
}
status_t Camera3Device::convertMetadataListToRequestListLocked(
- const List<const CameraMetadata> &metadataList, bool repeating,
+ const List<const CameraMetadata> &metadataList,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
RequestList *requestList) {
if (requestList == NULL) {
CLOGE("requestList cannot be NULL.");
@@ -800,9 +803,11 @@
}
int32_t burstId = 0;
- for (List<const CameraMetadata>::const_iterator it = metadataList.begin();
- it != metadataList.end(); ++it) {
- sp<CaptureRequest> newRequest = setUpRequestLocked(*it);
+ List<const CameraMetadata>::const_iterator metadataIt = metadataList.begin();
+ std::list<const SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
+ for (; metadataIt != metadataList.end() && surfaceMapIt != surfaceMaps.end();
+ ++metadataIt, ++surfaceMapIt) {
+ sp<CaptureRequest> newRequest = setUpRequestLocked(*metadataIt, *surfaceMapIt);
if (newRequest == 0) {
CLOGE("Can't create capture request");
return BAD_VALUE;
@@ -812,12 +817,12 @@
// Setup burst Id and request Id
newRequest->mResultExtras.burstId = burstId++;
- if (it->exists(ANDROID_REQUEST_ID)) {
- if (it->find(ANDROID_REQUEST_ID).count == 0) {
+ if (metadataIt->exists(ANDROID_REQUEST_ID)) {
+ if (metadataIt->find(ANDROID_REQUEST_ID).count == 0) {
CLOGE("RequestID entry exists; but must not be empty in metadata");
return BAD_VALUE;
}
- newRequest->mResultExtras.requestId = it->find(ANDROID_REQUEST_ID).data.i32[0];
+ newRequest->mResultExtras.requestId = metadataIt->find(ANDROID_REQUEST_ID).data.i32[0];
} else {
CLOGE("RequestID does not exist in metadata");
return BAD_VALUE;
@@ -827,6 +832,10 @@
ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId);
}
+ if (metadataIt != metadataList.end() || surfaceMapIt != surfaceMaps.end()) {
+ ALOGE("%s: metadataList and surfaceMaps are not the same size!", __FUNCTION__);
+ return BAD_VALUE;
+ }
// Setup batch size if this is a high speed video recording request.
if (mIsConstrainedHighSpeedConfiguration && requestList->size() > 0) {
@@ -846,12 +855,31 @@
ATRACE_CALL();
List<const CameraMetadata> requests;
+ std::list<const SurfaceMap> surfaceMaps;
+ convertToRequestList(requests, surfaceMaps, request);
+
+ return captureList(requests, surfaceMaps, /*lastFrameNumber*/NULL);
+}
+
+void Camera3Device::convertToRequestList(List<const CameraMetadata>& requests,
+ std::list<const SurfaceMap>& surfaceMaps,
+ const CameraMetadata& request) {
requests.push_back(request);
- return captureList(requests, /*lastFrameNumber*/NULL);
+
+ SurfaceMap surfaceMap;
+ camera_metadata_ro_entry streams = request.find(ANDROID_REQUEST_OUTPUT_STREAMS);
+ // With no surface list passed in, stream and surface will have 1-to-1
+ // mapping. So the surface index is 0 for each stream in the surfaceMap.
+ for (size_t i = 0; i < streams.count; i++) {
+ surfaceMap[streams.data.i32[i]].push_back(0);
+ }
+ surfaceMaps.push_back(surfaceMap);
}
status_t Camera3Device::submitRequestsHelper(
- const List<const CameraMetadata> &requests, bool repeating,
+ const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
/*out*/
int64_t *lastFrameNumber) {
ATRACE_CALL();
@@ -866,8 +894,8 @@
RequestList requestList;
- res = convertMetadataListToRequestListLocked(requests, repeating,
- /*out*/&requestList);
+ res = convertMetadataListToRequestListLocked(requests, surfaceMaps,
+ repeating, /*out*/&requestList);
if (res != OK) {
// error logged by previous call
return res;
@@ -1035,10 +1063,11 @@
}
status_t Camera3Device::captureList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber) {
ATRACE_CALL();
- return submitRequestsHelper(requests, /*repeating*/false, lastFrameNumber);
+ return submitRequestsHelper(requests, surfaceMaps, /*repeating*/false, lastFrameNumber);
}
status_t Camera3Device::setStreamingRequest(const CameraMetadata &request,
@@ -1046,19 +1075,23 @@
ATRACE_CALL();
List<const CameraMetadata> requests;
- requests.push_back(request);
- return setStreamingRequestList(requests, /*lastFrameNumber*/NULL);
+ std::list<const SurfaceMap> surfaceMaps;
+ convertToRequestList(requests, surfaceMaps, request);
+
+ return setStreamingRequestList(requests, /*surfaceMap*/surfaceMaps,
+ /*lastFrameNumber*/NULL);
}
status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber) {
ATRACE_CALL();
- return submitRequestsHelper(requests, /*repeating*/true, lastFrameNumber);
+ return submitRequestsHelper(requests, surfaceMaps, /*repeating*/true, lastFrameNumber);
}
sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
- const CameraMetadata &request) {
+ const CameraMetadata &request, const SurfaceMap &surfaceMap) {
status_t res;
if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
@@ -1074,7 +1107,7 @@
}
}
- sp<CaptureRequest> newRequest = createCaptureRequest(request);
+ sp<CaptureRequest> newRequest = createCaptureRequest(request, surfaceMap);
return newRequest;
}
@@ -1258,8 +1291,27 @@
}
status_t Camera3Device::createStream(sp<Surface> consumer,
- uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
- camera3_stream_rotation_t rotation, int *id, int streamSetId, uint32_t consumerUsage) {
+ uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId, uint32_t consumerUsage) {
+ ATRACE_CALL();
+
+ if (consumer == nullptr) {
+ ALOGE("%s: consumer must not be null", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ std::vector<sp<Surface>> consumers;
+ consumers.push_back(consumer);
+
+ return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
+ format, dataSpace, rotation, id, streamSetId, consumerUsage);
+}
+
+status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId, uint32_t consumerUsage) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
@@ -1303,18 +1355,24 @@
streamSetId = CAMERA3_STREAM_SET_ID_INVALID;
}
+ if (consumers.size() == 0 && !hasDeferredConsumer) {
+ ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
+ return BAD_VALUE;
+ }
// HAL3.1 doesn't support deferred consumer stream creation as it requires buffer registration
// which requires a consumer surface to be available.
- if (consumer == nullptr && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+ if (hasDeferredConsumer && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
ALOGE("HAL3.1 doesn't support deferred consumer stream creation");
return BAD_VALUE;
}
- if (consumer == nullptr && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ if (hasDeferredConsumer && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
ALOGE("Deferred consumer stream creation only support IMPLEMENTATION_DEFINED format");
return BAD_VALUE;
}
+ bool streamSharing = consumers.size() > 1 || (consumers.size() > 0 && hasDeferredConsumer);
+
// Use legacy dataspace values for older HALs
if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_3) {
dataSpace = mapToLegacyDataspace(dataSpace);
@@ -1334,7 +1392,7 @@
return BAD_VALUE;
}
}
- newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, blobBufferSize, format, dataSpace, rotation,
mTimestampOffset, streamSetId);
} else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -1343,15 +1401,19 @@
SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
return BAD_VALUE;
}
- newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
mTimestampOffset, streamSetId);
- } else if (consumer == nullptr) {
+ } else if (consumers.size() == 0 && hasDeferredConsumer) {
newStream = new Camera3OutputStream(mNextStreamId,
width, height, format, consumerUsage, dataSpace, rotation,
mTimestampOffset, streamSetId);
+ } else if (streamSharing) {
+ newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
+ hasDeferredConsumer, width, height, format, consumerUsage,
+ dataSpace, rotation, mTimestampOffset, streamSetId);
} else {
- newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, format, dataSpace, rotation,
mTimestampOffset, streamSetId);
}
@@ -2029,16 +2091,18 @@
return res;
}
- if (!stream->isConfiguring()) {
- CLOGE("Stream %d was already fully configured.", streamId);
- return INVALID_OPERATION;
- }
+ if (stream->isConsumerConfigurationDeferred()) {
+ if (!stream->isConfiguring()) {
+ CLOGE("Stream %d was already fully configured.", streamId);
+ return INVALID_OPERATION;
+ }
- res = stream->finishConfiguration();
- if (res != OK) {
- SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
- stream->getId(), strerror(-res), res);
- return res;
+ res = stream->finishConfiguration();
+ if (res != OK) {
+ SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+ stream->getId(), strerror(-res), res);
+ return res;
+ }
}
return OK;
@@ -2049,7 +2113,7 @@
*/
sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
- const CameraMetadata &request) {
+ const CameraMetadata &request, const SurfaceMap &surfaceMap) {
ATRACE_CALL();
status_t res;
@@ -2104,10 +2168,17 @@
mOutputStreams.editValueAt(idx);
// It is illegal to include a deferred consumer output stream into a request
- if (stream->isConsumerConfigurationDeferred()) {
- CLOGE("Stream %d hasn't finished configuration yet due to deferred consumer",
- stream->getId());
- return NULL;
+ auto iter = surfaceMap.find(streams.data.i32[i]);
+ if (iter != surfaceMap.end()) {
+ const std::vector<size_t>& surfaces = iter->second;
+ for (const auto& surface : surfaces) {
+ if (stream->isConsumerConfigurationDeferred(surface)) {
+ CLOGE("Stream %d surface %zu hasn't finished configuration yet "
+ "due to deferred consumer", stream->getId(), surface);
+ return NULL;
+ }
+ }
+ newRequest->mOutputSurfaces[i] = surfaces;
}
// Lazy completion of stream configuration (allocation/registration)
@@ -3927,6 +3998,14 @@
return TIMED_OUT;
}
halRequest->num_output_buffers++;
+
+ res = outputStream->notifyRequestedSurfaces(halRequest->frame_number,
+ captureRequest->mOutputSurfaces[i]);
+ if (res != OK) {
+ ALOGE("RequestThread: Cannot register output surfaces: %s (%d)",
+ strerror(-res), res);
+ return INVALID_OPERATION;
+ }
}
totalNumBuffers += halRequest->num_output_buffers;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 217c8b7..fe4508d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -95,10 +95,12 @@
// idle state
status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) override;
status_t captureList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) override;
status_t setStreamingRequest(const CameraMetadata &request,
int64_t *lastFrameNumber = NULL) override;
status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) override;
status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) override;
@@ -114,6 +116,12 @@
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
uint32_t consumerUsage = 0) override;
+ status_t createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+ uint32_t consumerUsage = 0) override;
+
status_t createInputStream(
uint32_t width, uint32_t height, int format,
int *id) override;
@@ -342,6 +350,7 @@
camera3_stream_buffer_t mInputBuffer;
Vector<sp<camera3::Camera3OutputStreamInterface> >
mOutputStreams;
+ SurfaceMap mOutputSurfaces;
CaptureResultExtras mResultExtras;
// Used to cancel AE precapture trigger for devices doesn't support
// CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
@@ -360,11 +369,18 @@
status_t convertMetadataListToRequestListLocked(
const List<const CameraMetadata> &metadataList,
+ const std::list<const SurfaceMap> &surfaceMaps,
bool repeating,
/*out*/
RequestList *requestList);
- status_t submitRequestsHelper(const List<const CameraMetadata> &requests, bool repeating,
+ void convertToRequestList(List<const CameraMetadata>& requests,
+ std::list<const SurfaceMap>& surfaceMaps,
+ const CameraMetadata& request);
+
+ status_t submitRequestsHelper(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
int64_t *lastFrameNumber = NULL);
@@ -436,13 +452,15 @@
* Do common work for setting up a streaming or single capture request.
* On success, will transition to ACTIVE if in IDLE.
*/
- sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request);
+ sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request,
+ const SurfaceMap &surfaceMap);
/**
* Build a CaptureRequest request from the CameraDeviceBase request
* settings.
*/
- sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request);
+ sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request,
+ const SurfaceMap &surfaceMap);
/**
* Take the currently-defined set of streams and configure the HAL to use
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 5123785..7f61c7a 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -83,6 +83,14 @@
return OK;
}
+status_t Camera3DummyStream::notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids) {
+ (void) frame_number;
+ (void) surface_ids;
+ // Do nothing
+ return OK;
+}
+
status_t Camera3DummyStream::configureQueueLocked() {
// Do nothing
return OK;
@@ -103,7 +111,7 @@
return false;
}
-bool Camera3DummyStream::isConsumerConfigurationDeferred() const {
+bool Camera3DummyStream::isConsumerConfigurationDeferred(size_t /*surface_id*/) const {
return false;
}
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 18e8a23..37efbbb 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -56,6 +56,9 @@
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
+ virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids);
+
/**
* Return if this output stream is for video encoding.
*/
@@ -64,7 +67,7 @@
/**
* Return if the consumer configuration of this stream is deferred.
*/
- virtual bool isConsumerConfigurationDeferred() const;
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
/**
* Set the consumer surface to the output stream.
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 7229929..1e76a27 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -124,6 +124,7 @@
int format,
android_dataspace dataSpace,
camera3_stream_rotation_t rotation,
+ uint32_t consumerUsage, nsecs_t timestampOffset,
int setId) :
Camera3IOStreamBase(id, type, width, height,
/*maxSize*/0,
@@ -132,7 +133,8 @@
mTraceFirstBuffer(true),
mUseMonoTimestamp(false),
mUseBufferManager(false),
- mConsumerUsage(0) {
+ mTimestampOffset(timestampOffset),
+ mConsumerUsage(consumerUsage) {
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
mBufferReleasedListener = new BufferReleasedListener(this);
@@ -373,6 +375,24 @@
return res;
}
+ if ((res = configureConsumerQueueLocked()) != OK) {
+ return res;
+ }
+
+ // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+ // We need skip these cases as timeout will disable the non-blocking (async) mode.
+ if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
+ mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+ }
+
+ return OK;
+}
+
+status_t Camera3OutputStream::configureConsumerQueueLocked() {
+ status_t res;
+
+ mTraceFirstBuffer = true;
+
ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
// Configure consumer-side ANativeWindow interface. The listener may be used
@@ -470,12 +490,7 @@
if (res != OK) {
ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
__FUNCTION__, mTransform, strerror(-res), res);
- }
-
- // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
- // We need skip these cases as timeout will disable the non-blocking (async) mode.
- if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
- mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+ return res;
}
/**
@@ -568,14 +583,24 @@
status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) const {
status_t res;
- int32_t u = 0;
+
if (mConsumer == nullptr) {
// mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
*usage = mConsumerUsage;
return OK;
}
- res = static_cast<ANativeWindow*>(mConsumer.get())->query(mConsumer.get(),
+ res = getEndpointUsageForSurface(usage, mConsumer);
+
+ return res;
+}
+
+status_t Camera3OutputStream::getEndpointUsageForSurface(uint32_t *usage,
+ const sp<Surface>& surface) const {
+ status_t res;
+ int32_t u = 0;
+
+ res = static_cast<ANativeWindow*>(surface.get())->query(surface.get(),
NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
// If an opaque output stream's endpoint is ImageReader, add
@@ -587,8 +612,8 @@
// 3. GRALLOC_USAGE_HW_COMPOSER
// 4. GRALLOC_USAGE_HW_VIDEO_ENCODER
if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
- (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER |
- GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
+ (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
+ GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
u |= GRALLOC_USAGE_HW_CAMERA_ZSL;
}
@@ -676,8 +701,17 @@
return OK;
}
-bool Camera3OutputStream::isConsumerConfigurationDeferred() const {
+status_t Camera3OutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+ const std::vector<size_t>& /*surface_ids*/) {
+ return OK;
+}
+
+bool Camera3OutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
Mutex::Autolock l(mLock);
+
+ if (surface_id != 0) {
+ ALOGE("%s: surface_id for Camera3OutputStream should be 0!", __FUNCTION__);
+ }
return mConsumer == nullptr;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 12d497e..26ea63f 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -135,7 +135,7 @@
/**
* Return if the consumer configuration of this stream is deferred.
*/
- virtual bool isConsumerConfigurationDeferred() const;
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
/**
* Set the consumer surface to the output stream.
@@ -158,6 +158,9 @@
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
+ virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids);
+
/**
* Set the graphic buffer manager to get/return the stream buffers.
*
@@ -169,6 +172,7 @@
Camera3OutputStream(int id, camera3_stream_type_t type,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+ uint32_t consumerUsage = 0, nsecs_t timestampOffset = 0,
int setId = CAMERA3_STREAM_SET_ID_INVALID);
/**
@@ -183,12 +187,19 @@
virtual status_t disconnectLocked();
+ status_t getEndpointUsageForSurface(uint32_t *usage,
+ const sp<Surface>& surface) const;
+ status_t configureConsumerQueueLocked();
+
+ // Consumer as the output of camera HAL
sp<Surface> mConsumer;
- private:
+ uint32_t getPresetConsumerUsage() const { return mConsumerUsage; }
static const nsecs_t kDequeueBufferTimeout = 1000000000; // 1 sec
+ private:
+
int mTransform;
virtual status_t setTransformLocked(int transform);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index 3f83c89..6a911c6 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -43,7 +43,7 @@
/**
* Return if the consumer configuration of this stream is deferred.
*/
- virtual bool isConsumerConfigurationDeferred() const = 0;
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id = 0) const = 0;
/**
* Set the consumer surface to the output stream.
@@ -59,6 +59,20 @@
*
*/
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) = 0;
+
+ /**
+ * Notify which surfaces are requested for a particular frame number.
+ *
+ * Mulitple surfaces could share the same output stream, but a request may
+ * be only for a subset of surfaces. In this case, the
+ * Camera3OutputStreamInterface object needs to manage the output surfaces on
+ * a per request basis.
+ *
+ * If there is only one surface for this output stream, calling this
+ * function is a no-op.
+ */
+ virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids) = 0;
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
new file mode 100644
index 0000000..b419e06
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Camera3SharedOutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3SharedOutputStream::Camera3SharedOutputStream(int id,
+ const std::vector<sp<Surface>>& surfaces,
+ bool hasDeferredSurface,
+ uint32_t width, uint32_t height, int format,
+ uint32_t consumerUsage, android_dataspace dataSpace,
+ camera3_stream_rotation_t rotation,
+ nsecs_t timestampOffset, int setId) :
+ Camera3OutputStream(id, CAMERA3_STREAM_OUTPUT, width, height,
+ format, dataSpace, rotation, consumerUsage,
+ timestampOffset, setId),
+ mSurfaces(surfaces),
+ mDeferred(hasDeferredSurface) {
+}
+
+Camera3SharedOutputStream::~Camera3SharedOutputStream() {
+ disconnectLocked();
+}
+
+status_t Camera3SharedOutputStream::connectStreamSplitterLocked() {
+ status_t res = OK;
+
+ mStreamSplitter = new Camera3StreamSplitter();
+
+ uint32_t usage;
+ getEndpointUsage(&usage);
+
+ res = mStreamSplitter->connect(mSurfaces, usage, camera3_stream::max_buffers, mConsumer);
+ if (res != OK) {
+ ALOGE("%s: Failed to connect to stream splitter: %s(%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ return res;
+}
+
+status_t Camera3SharedOutputStream::notifyRequestedSurfaces(uint32_t /*frame_number*/,
+ const std::vector<size_t>& surface_ids) {
+ Mutex::Autolock l(mLock);
+ status_t res = OK;
+
+ if (mStreamSplitter != nullptr) {
+ res = mStreamSplitter->notifyRequestedSurfaces(surface_ids);
+ }
+
+ return res;
+}
+
+bool Camera3SharedOutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
+ Mutex::Autolock l(mLock);
+ return (mDeferred && surface_id >= mSurfaces.size());
+}
+
+status_t Camera3SharedOutputStream::setConsumer(sp<Surface> surface) {
+ if (surface == nullptr) {
+ ALOGE("%s: it's illegal to set a null consumer surface!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ if (!mDeferred) {
+ ALOGE("%s: Current stream isn't deferred!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ mSurfaces.push_back(surface);
+
+ return mStreamSplitter->addOutput(surface, camera3_stream::max_buffers);
+}
+
+status_t Camera3SharedOutputStream::configureQueueLocked() {
+ status_t res;
+
+ if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
+ return res;
+ }
+
+ res = connectStreamSplitterLocked();
+ if (res != OK) {
+ ALOGE("Cannot connect to stream splitter: %s(%d)", strerror(-res), res);
+ return res;
+ }
+
+ res = configureConsumerQueueLocked();
+ if (res != OK) {
+ ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
+ return res;
+ }
+
+ return OK;
+}
+
+status_t Camera3SharedOutputStream::disconnectLocked() {
+ status_t res;
+ res = Camera3OutputStream::disconnectLocked();
+
+ if (mStreamSplitter != nullptr) {
+ mStreamSplitter->disconnect();
+ }
+
+ return res;
+}
+
+status_t Camera3SharedOutputStream::getEndpointUsage(uint32_t *usage) const {
+
+ status_t res;
+ uint32_t u = 0;
+
+ if (mConsumer == nullptr) {
+ // Called before shared buffer queue is constructed.
+ *usage = getPresetConsumerUsage();
+
+ for (auto surface : mSurfaces) {
+ if (surface != nullptr) {
+ res = getEndpointUsageForSurface(&u, surface);
+ *usage |= u;
+ }
+ }
+ } else {
+ // Called after shared buffer queue is constructed.
+ res = getEndpointUsageForSurface(&u, mConsumer);
+ *usage |= u;
+ }
+
+ return res;
+}
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
new file mode 100644
index 0000000..1b37d7c
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+
+#include "Camera3StreamSplitter.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3SharedOutputStream :
+ public Camera3OutputStream {
+public:
+ /**
+ * Set up a stream for formats that have 2 dimensions, with multiple
+ * surfaces. A valid stream set id needs to be set to support buffer
+ * sharing between multiple streams.
+ */
+ Camera3SharedOutputStream(int id, const std::vector<sp<Surface>>& surfaces,
+ bool hasDeferredSurface, uint32_t width, uint32_t height, int format,
+ uint32_t consumerUsage, android_dataspace dataSpace,
+ camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+ int setId = CAMERA3_STREAM_SET_ID_INVALID);
+
+ virtual ~Camera3SharedOutputStream();
+
+ virtual status_t notifyRequestedSurfaces(uint32_t frame_number,
+ const std::vector<size_t>& surface_ids);
+
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
+
+ virtual status_t setConsumer(sp<Surface> consumer);
+
+private:
+ // Surfaces passed in constructor from app
+ std::vector<sp<Surface> > mSurfaces;
+
+ /**
+ * The Camera3StreamSplitter object this stream uses for stream
+ * sharing.
+ */
+ sp<Camera3StreamSplitter> mStreamSplitter;
+
+ /**
+ * Initialize stream splitter.
+ */
+ status_t connectStreamSplitterLocked();
+
+ virtual status_t configureQueueLocked();
+
+ virtual status_t disconnectLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage) const;
+
+ bool mDeferred;
+
+}; // class Camera3SharedOutputStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
new file mode 100644
index 0000000..b935141
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -0,0 +1,441 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#define LOG_TAG "Camera3StreamSplitter"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <gui/BufferItem.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/BufferQueue.h>
+#include <gui/Surface.h>
+
+#include <ui/GraphicBuffer.h>
+
+#include <binder/ProcessState.h>
+
+#include <utils/Trace.h>
+
+#include "Camera3StreamSplitter.h"
+
+namespace android {
+
+status_t Camera3StreamSplitter::connect(const std::vector<sp<Surface> >& surfaces,
+ uint32_t consumerUsage, size_t hal_max_buffers,
+ sp<Surface>& consumer) {
+ if (consumer != nullptr) {
+ ALOGE("%s: output Surface is not NULL", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mMutex);
+ status_t res = OK;
+
+ if (mOutputs.size() > 0 || mConsumer != nullptr) {
+ ALOGE("%s: StreamSplitter already connected", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ // Add output surfaces. This has to be before creating internal buffer queue
+ // in order to get max consumer side buffers.
+ for (size_t i = 0; i < surfaces.size(); i++) {
+ if (surfaces[i] != nullptr) {
+ res = addOutputLocked(surfaces[i], hal_max_buffers,
+ OutputType::NonDeferred);
+ if (res != OK) {
+ ALOGE("%s: Failed to add output surface: %s(%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ }
+ }
+
+ // Create buffer queue for input
+ BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+
+ mBufferItemConsumer = new BufferItemConsumer(mConsumer, consumerUsage,
+ mMaxConsumerBuffers);
+ if (mBufferItemConsumer == nullptr) {
+ return NO_MEMORY;
+ }
+ mConsumer->setConsumerName(getUniqueConsumerName());
+
+ mSurface = new Surface(mProducer);
+ if (mSurface == nullptr) {
+ return NO_MEMORY;
+ }
+ consumer = mSurface;
+
+ res = mConsumer->consumerConnect(this, /* controlledByApp */ false);
+
+ return res;
+}
+
+void Camera3StreamSplitter::disconnect() {
+ Mutex::Autolock lock(mMutex);
+
+ for (auto& output : mOutputs) {
+ output->disconnect(NATIVE_WINDOW_API_CAMERA);
+ }
+ mOutputs.clear();
+
+ if (mConsumer != nullptr) {
+ mConsumer->consumerDisconnect();
+ mConsumer.clear();
+ }
+
+ if (mBuffers.size() > 0) {
+ ALOGI("%zu buffers still being tracked", mBuffers.size());
+ }
+}
+
+Camera3StreamSplitter::~Camera3StreamSplitter() {
+ disconnect();
+}
+
+status_t Camera3StreamSplitter::addOutput(
+ sp<Surface>& outputQueue, size_t hal_max_buffers) {
+ Mutex::Autolock lock(mMutex);
+ return addOutputLocked(outputQueue, hal_max_buffers, OutputType::Deferred);
+}
+
+status_t Camera3StreamSplitter::addOutputLocked(
+ const sp<Surface>& outputQueue, size_t hal_max_buffers,
+ OutputType outputType) {
+ if (outputQueue == nullptr) {
+ ALOGE("addOutput: outputQueue must not be NULL");
+ return BAD_VALUE;
+ }
+ if (hal_max_buffers < 1) {
+ ALOGE("%s: Camera HAL requested max_buffer count: %zu, requires at least 1",
+ __FUNCTION__, hal_max_buffers);
+ return BAD_VALUE;
+ }
+
+ sp<IGraphicBufferProducer> gbp = outputQueue->getIGraphicBufferProducer();
+ // Connect to the buffer producer
+ IGraphicBufferProducer::QueueBufferOutput queueBufferOutput;
+ sp<OutputListener> listener(new OutputListener(this, gbp));
+ IInterface::asBinder(gbp)->linkToDeath(listener);
+ status_t status = gbp->connect(listener, NATIVE_WINDOW_API_CAMERA,
+ /* producerControlledByApp */ true, &queueBufferOutput);
+ if (status != NO_ERROR) {
+ ALOGE("addOutput: failed to connect (%d)", status);
+ return status;
+ }
+
+ // Query consumer side buffer count, and update overall buffer count
+ int maxConsumerBuffers = 0;
+ status = static_cast<ANativeWindow*>(outputQueue.get())->query(
+ outputQueue.get(),
+ NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
+ if (status != OK) {
+ ALOGE("%s: Unable to query consumer undequeued buffer count"
+ " for surface", __FUNCTION__);
+ return status;
+ }
+
+ if (maxConsumerBuffers > mMaxConsumerBuffers) {
+ if (outputType == OutputType::Deferred) {
+ ALOGE("%s: Fatal: Deferred surface has higher consumer buffer count"
+ " %d than what's already configured %d", __FUNCTION__,
+ maxConsumerBuffers, mMaxConsumerBuffers);
+ return BAD_VALUE;
+ }
+ mMaxConsumerBuffers = maxConsumerBuffers;
+ }
+
+ ALOGV("%s: Consumer wants %d buffers, HAL wants %zu", __FUNCTION__,
+ maxConsumerBuffers, hal_max_buffers);
+ size_t totalBufferCount = maxConsumerBuffers + hal_max_buffers;
+ status = native_window_set_buffer_count(outputQueue.get(),
+ totalBufferCount);
+ if (status != OK) {
+ ALOGE("%s: Unable to set buffer count for surface %p",
+ __FUNCTION__, outputQueue.get());
+ return status;
+ }
+
+ // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+ // We need skip these cases as timeout will disable the non-blocking (async) mode.
+ int32_t usage = 0;
+ static_cast<ANativeWindow*>(outputQueue.get())->query(
+ outputQueue.get(),
+ NATIVE_WINDOW_CONSUMER_USAGE_BITS, &usage);
+ if (!(usage & (GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_TEXTURE))) {
+ outputQueue->setDequeueTimeout(kDequeueBufferTimeout);
+ }
+
+ status = gbp->allowAllocation(false);
+ if (status != OK) {
+ ALOGE("%s: Failed to turn off allocation for outputQueue", __FUNCTION__);
+ return status;
+ }
+
+ // Add new entry into mOutputs
+ mOutputs.push_back(gbp);
+ return NO_ERROR;
+}
+
+String8 Camera3StreamSplitter::getUniqueConsumerName() {
+ static volatile int32_t counter = 0;
+ return String8::format("Camera3StreamSplitter-%d", android_atomic_inc(&counter));
+}
+
+status_t Camera3StreamSplitter::notifyRequestedSurfaces(
+ const std::vector<size_t>& surfaces) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ mRequestedSurfaces.push_back(surfaces);
+ return OK;
+}
+
+
+void Camera3StreamSplitter::onFrameAvailable(const BufferItem& /* item */) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ // The current policy is that if any one consumer is consuming buffers too
+ // slowly, the splitter will stall the rest of the outputs by not acquiring
+ // any more buffers from the input. This will cause back pressure on the
+ // input queue, slowing down its producer.
+
+ // If there are too many outstanding buffers, we block until a buffer is
+ // released back to the input in onBufferReleased
+ while (mOutstandingBuffers >= mMaxConsumerBuffers) {
+ mReleaseCondition.wait(mMutex);
+
+ // If the splitter is abandoned while we are waiting, the release
+ // condition variable will be broadcast, and we should just return
+ // without attempting to do anything more (since the input queue will
+ // also be abandoned).
+ if (mIsAbandoned) {
+ return;
+ }
+ }
+ // If the splitter is abandoned without reaching mMaxConsumerBuffers, just
+ // return without attempting to do anything more.
+ if (mIsAbandoned) {
+ return;
+ }
+
+ ++mOutstandingBuffers;
+
+ // Acquire and detach the buffer from the input
+ BufferItem bufferItem;
+ status_t status = mConsumer->acquireBuffer(&bufferItem, /* presentWhen */ 0);
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "acquiring buffer from input failed (%d)", status);
+
+ ALOGV("acquired buffer %#" PRIx64 " from input",
+ bufferItem.mGraphicBuffer->getId());
+
+ status = mConsumer->detachBuffer(bufferItem.mSlot);
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "detaching buffer from input failed (%d)", status);
+
+ IGraphicBufferProducer::QueueBufferInput queueInput(
+ bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp,
+ bufferItem.mDataSpace, bufferItem.mCrop,
+ static_cast<int32_t>(bufferItem.mScalingMode),
+ bufferItem.mTransform, bufferItem.mFence);
+
+ // Attach and queue the buffer to each of the outputs
+ std::vector<std::vector<size_t> >::iterator surfaces = mRequestedSurfaces.begin();
+ if (surfaces != mRequestedSurfaces.end()) {
+
+ LOG_ALWAYS_FATAL_IF(surfaces->size() == 0,
+ "requested surface ids shouldn't be empty");
+
+ // Initialize our reference count for this buffer
+ mBuffers[bufferItem.mGraphicBuffer->getId()] =
+ std::unique_ptr<BufferTracker>(
+ new BufferTracker(bufferItem.mGraphicBuffer, surfaces->size()));
+
+ for (auto id : *surfaces) {
+
+ LOG_ALWAYS_FATAL_IF(id >= mOutputs.size(),
+ "requested surface id exceeding max registered ids");
+
+ int slot = BufferItem::INVALID_BUFFER_SLOT;
+ status = mOutputs[id]->attachBuffer(&slot, bufferItem.mGraphicBuffer);
+ if (status == NO_INIT) {
+ // If we just discovered that this output has been abandoned, note
+ // that, decrement the reference count so that we still release this
+ // buffer eventually, and move on to the next output
+ onAbandonedLocked();
+ mBuffers[bufferItem.mGraphicBuffer->getId()]->
+ decrementReferenceCountLocked();
+ continue;
+ } else if (status == WOULD_BLOCK) {
+ // If the output is async, attachBuffer may return WOULD_BLOCK
+ // indicating number of dequeued buffers has reached limit. In
+ // this case, simply decrement the reference count, and move on
+ // to the next output.
+ // TODO: Do we need to report BUFFER_ERROR for this result?
+ mBuffers[bufferItem.mGraphicBuffer->getId()]->
+ decrementReferenceCountLocked();
+ continue;
+ } else if (status == TIMED_OUT) {
+ // If attachBuffer times out due to the value set by
+ // setDequeueTimeout, simply decrement the reference count, and
+ // move on to the next output.
+ // TODO: Do we need to report BUFFER_ERROR for this result?
+ mBuffers[bufferItem.mGraphicBuffer->getId()]->
+ decrementReferenceCountLocked();
+ continue;
+ } else {
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "attaching buffer to output failed (%d)", status);
+ }
+
+ IGraphicBufferProducer::QueueBufferOutput queueOutput;
+ status = mOutputs[id]->queueBuffer(slot, queueInput, &queueOutput);
+ if (status == NO_INIT) {
+ // If we just discovered that this output has been abandoned, note
+ // that, increment the release count so that we still release this
+ // buffer eventually, and move on to the next output
+ onAbandonedLocked();
+ mBuffers[bufferItem.mGraphicBuffer->getId()]->
+ decrementReferenceCountLocked();
+ continue;
+ } else {
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "queueing buffer to output failed (%d)", status);
+ }
+
+ ALOGV("queued buffer %#" PRIx64 " to output %p",
+ bufferItem.mGraphicBuffer->getId(), mOutputs[id].get());
+ }
+
+ mRequestedSurfaces.erase(surfaces);
+ }
+}
+
+void Camera3StreamSplitter::onBufferReleasedByOutput(
+ const sp<IGraphicBufferProducer>& from) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ sp<GraphicBuffer> buffer;
+ sp<Fence> fence;
+ status_t status = from->detachNextBuffer(&buffer, &fence);
+ if (status == NO_INIT) {
+ // If we just discovered that this output has been abandoned, note that,
+ // but we can't do anything else, since buffer is invalid
+ onAbandonedLocked();
+ return;
+ } else {
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "detaching buffer from output failed (%d)", status);
+ }
+
+ ALOGV("detached buffer %#" PRIx64 " from output %p",
+ buffer->getId(), from.get());
+
+ BufferTracker& tracker = *(mBuffers[buffer->getId()]);
+
+ // Merge the release fence of the incoming buffer so that the fence we send
+ // back to the input includes all of the outputs' fences
+ tracker.mergeFence(fence);
+
+ // Check to see if this is the last outstanding reference to this buffer
+ size_t referenceCount = tracker.decrementReferenceCountLocked();
+ ALOGV("buffer %#" PRIx64 " reference count %zu", buffer->getId(),
+ referenceCount);
+ if (referenceCount > 0) {
+ return;
+ }
+
+ // If we've been abandoned, we can't return the buffer to the input, so just
+ // stop tracking it and move on
+ if (mIsAbandoned) {
+ mBuffers.erase(buffer->getId());
+ return;
+ }
+
+ // Attach and release the buffer back to the input
+ int consumerSlot = BufferItem::INVALID_BUFFER_SLOT;
+ status = mConsumer->attachBuffer(&consumerSlot, tracker.getBuffer());
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "attaching buffer to input failed (%d)", status);
+
+ status = mConsumer->releaseBuffer(consumerSlot, /* frameNumber */ 0,
+ EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, tracker.getMergedFence());
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "releasing buffer to input failed (%d)", status);
+
+ ALOGV("released buffer %#" PRIx64 " to input", buffer->getId());
+
+ // We no longer need to track the buffer once it has been returned to the
+ // input
+ mBuffers.erase(buffer->getId());
+
+ // Notify any waiting onFrameAvailable calls
+ --mOutstandingBuffers;
+ mReleaseCondition.signal();
+}
+
+void Camera3StreamSplitter::onAbandonedLocked() {
+ ALOGE("one of my outputs has abandoned me");
+ if (!mIsAbandoned && mConsumer != nullptr) {
+ mConsumer->consumerDisconnect();
+ }
+ mIsAbandoned = true;
+ mReleaseCondition.broadcast();
+}
+
+Camera3StreamSplitter::OutputListener::OutputListener(
+ wp<Camera3StreamSplitter> splitter,
+ wp<IGraphicBufferProducer> output)
+ : mSplitter(splitter), mOutput(output) {}
+
+void Camera3StreamSplitter::OutputListener::onBufferReleased() {
+ sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+ sp<IGraphicBufferProducer> output = mOutput.promote();
+ if (splitter != nullptr && output != nullptr) {
+ splitter->onBufferReleasedByOutput(output);
+ }
+}
+
+void Camera3StreamSplitter::OutputListener::binderDied(const wp<IBinder>& /* who */) {
+ sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+ if (splitter != nullptr) {
+ Mutex::Autolock lock(splitter->mMutex);
+ splitter->onAbandonedLocked();
+ }
+}
+
+Camera3StreamSplitter::BufferTracker::BufferTracker(
+ const sp<GraphicBuffer>& buffer, size_t referenceCount)
+ : mBuffer(buffer), mMergedFence(Fence::NO_FENCE),
+ mReferenceCount(referenceCount) {}
+
+void Camera3StreamSplitter::BufferTracker::mergeFence(const sp<Fence>& with) {
+ mMergedFence = Fence::merge(String8("Camera3StreamSplitter"), mMergedFence, with);
+}
+
+size_t Camera3StreamSplitter::BufferTracker::decrementReferenceCountLocked() {
+ if (mReferenceCount > 0)
+ --mReferenceCount;
+ return mReferenceCount;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
new file mode 100644
index 0000000..5a25712
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_STREAMSPLITTER_H
+#define ANDROID_SERVERS_STREAMSPLITTER_H
+
+#include <gui/IConsumerListener.h>
+#include <gui/IProducerListener.h>
+#include <gui/BufferItemConsumer.h>
+
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class IGraphicBufferProducer;
+
+// Camera3StreamSplitter is an autonomous class that manages one input BufferQueue
+// and multiple output BufferQueues. By using the buffer attach and detach logic
+// in BufferQueue, it is able to present the illusion of a single split
+// BufferQueue, where each buffer queued to the input is available to be
+// acquired by each of the outputs, and is able to be dequeued by the input
+// again only once all of the outputs have released it.
+class Camera3StreamSplitter : public BnConsumerListener {
+public:
+
+ // Constructor
+ Camera3StreamSplitter() = default;
+
+ // Connect to the stream splitter by creating buffer queue and connecting it
+ // with output surfaces.
+ status_t connect(const std::vector<sp<Surface> >& surfaces,
+ uint32_t consumerUsage, size_t hal_max_buffers,
+ sp<Surface>& consumer);
+
+ // addOutput adds an output BufferQueue to the splitter. The splitter
+ // connects to outputQueue as a CPU producer, and any buffers queued
+ // to the input will be queued to each output. It is assumed that all of the
+ // outputs are added before any buffers are queued on the input. If any
+ // output is abandoned by its consumer, the splitter will abandon its input
+ // queue (see onAbandoned).
+ //
+ // A return value other than NO_ERROR means that an error has occurred and
+ // outputQueue has not been added to the splitter. BAD_VALUE is returned if
+ // outputQueue is NULL. See IGraphicBufferProducer::connect for explanations
+ // of other error codes.
+ status_t addOutput(sp<Surface>& outputQueue, size_t hal_max_buffers);
+
+ // Request surfaces for a particular frame number. The requested surfaces
+ // are stored in a FIFO queue. And when the buffer becomes available from the
+ // input queue, the registered surfaces are used to decide which output is
+ // the buffer sent to.
+ status_t notifyRequestedSurfaces(const std::vector<size_t>& surfaces);
+
+ // Disconnect the buffer queue from output surfaces.
+ void disconnect();
+
+private:
+ // From IConsumerListener
+ //
+ // During this callback, we store some tracking information, detach the
+ // buffer from the input, and attach it to each of the outputs. This call
+ // can block if there are too many outstanding buffers. If it blocks, it
+ // will resume when onBufferReleasedByOutput releases a buffer back to the
+ // input.
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // From IConsumerListener
+ // We don't care about released buffers because we detach each buffer as
+ // soon as we acquire it. See the comment for onBufferReleased below for
+ // some clarifying notes about the name.
+ void onBuffersReleased() override {}
+
+ // From IConsumerListener
+ // We don't care about sideband streams, since we won't be splitting them
+ void onSidebandStreamChanged() override {}
+
+ // This is the implementation of the onBufferReleased callback from
+ // IProducerListener. It gets called from an OutputListener (see below), and
+ // 'from' is which producer interface from which the callback was received.
+ //
+ // During this callback, we detach the buffer from the output queue that
+ // generated the callback, update our state tracking to see if this is the
+ // last output releasing the buffer, and if so, release it to the input.
+ // If we release the buffer to the input, we allow a blocked
+ // onFrameAvailable call to proceed.
+ void onBufferReleasedByOutput(const sp<IGraphicBufferProducer>& from);
+
+ // When this is called, the splitter disconnects from (i.e., abandons) its
+ // input queue and signals any waiting onFrameAvailable calls to wake up.
+ // It still processes callbacks from other outputs, but only detaches their
+ // buffers so they can continue operating until they run out of buffers to
+ // acquire. This must be called with mMutex locked.
+ void onAbandonedLocked();
+
+ // This is a thin wrapper class that lets us determine which BufferQueue
+ // the IProducerListener::onBufferReleased callback is associated with. We
+ // create one of these per output BufferQueue, and then pass the producer
+ // into onBufferReleasedByOutput above.
+ class OutputListener : public BnProducerListener,
+ public IBinder::DeathRecipient {
+ public:
+ OutputListener(wp<Camera3StreamSplitter> splitter,
+ wp<IGraphicBufferProducer> output);
+ virtual ~OutputListener() = default;
+
+ // From IProducerListener
+ void onBufferReleased() override;
+
+ // From IBinder::DeathRecipient
+ void binderDied(const wp<IBinder>& who) override;
+
+ private:
+ wp<Camera3StreamSplitter> mSplitter;
+ wp<IGraphicBufferProducer> mOutput;
+ };
+
+ class BufferTracker {
+ public:
+ BufferTracker(const sp<GraphicBuffer>& buffer, size_t referenceCount);
+ ~BufferTracker() = default;
+
+ const sp<GraphicBuffer>& getBuffer() const { return mBuffer; }
+ const sp<Fence>& getMergedFence() const { return mMergedFence; }
+
+ void mergeFence(const sp<Fence>& with);
+
+ // Returns the new value
+ // Only called while mMutex is held
+ size_t decrementReferenceCountLocked();
+
+ private:
+
+ // Disallow copying
+ BufferTracker(const BufferTracker& other);
+ BufferTracker& operator=(const BufferTracker& other);
+
+ sp<GraphicBuffer> mBuffer; // One instance that holds this native handle
+ sp<Fence> mMergedFence;
+ size_t mReferenceCount;
+ };
+
+ // A deferred output is an output being added to the splitter after
+ // connect() call, whereas a non deferred output is added within connect()
+ // call.
+ enum class OutputType { NonDeferred, Deferred };
+
+ // Must be accessed through RefBase
+ virtual ~Camera3StreamSplitter();
+
+ status_t addOutputLocked(const sp<Surface>& outputQueue,
+ size_t hal_max_buffers, OutputType outputType);
+
+ // Get unique name for the buffer queue consumer
+ static String8 getUniqueConsumerName();
+
+ // Max consumer side buffers for deferred surface. This will be used as a
+ // lower bound for overall consumer side max buffers.
+ static const int MAX_BUFFERS_DEFERRED_OUTPUT = 2;
+ int mMaxConsumerBuffers = MAX_BUFFERS_DEFERRED_OUTPUT;
+
+ static const nsecs_t kDequeueBufferTimeout = s2ns(1); // 1 sec
+
+ // mIsAbandoned is set to true when an output dies. Once the Camera3StreamSplitter
+ // has been abandoned, it will continue to detach buffers from other
+ // outputs, but it will disconnect from the input and not attempt to
+ // communicate with it further.
+ bool mIsAbandoned = false;
+
+ Mutex mMutex;
+ Condition mReleaseCondition;
+ int mOutstandingBuffers = 0;
+
+ sp<IGraphicBufferProducer> mProducer;
+ sp<IGraphicBufferConsumer> mConsumer;
+ sp<BufferItemConsumer> mBufferItemConsumer;
+ sp<Surface> mSurface;
+
+ std::vector<sp<IGraphicBufferProducer> > mOutputs;
+ // Tracking which outputs should the buffer be attached and queued
+ // to for each input buffer.
+ std::vector<std::vector<size_t> > mRequestedSurfaces;
+
+ // Map of GraphicBuffer IDs (GraphicBuffer::getId()) to buffer tracking
+ // objects (which are mostly for counting how many outputs have released the
+ // buffer, but also contain merged release fences).
+ std::unordered_map<uint64_t, std::unique_ptr<BufferTracker> > mBuffers;
+};
+
+} // namespace android
+
+#endif