Camera: Add DepthCompositeStream
Add the necessary logic to support dynamic depth metadata.
Bug: 109735087
Test: Manual using application,
Camera CTS
Change-Id: Ic4710872dc596bc718270e1c79d4da53fb850875
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 46fbc3e..9e203da 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -33,6 +33,8 @@
#include <camera_metadata_hidden.h>
+#include "DepthCompositeStream.h"
+
// Convenience methods for constructing binder::Status objects for error returns
#define STATUS_ERROR(errorCode, errorString) \
@@ -143,6 +145,7 @@
binder::Status CameraDeviceClient::insertGbpLocked(const sp<IGraphicBufferProducer>& gbp,
SurfaceMap* outSurfaceMap, Vector<int32_t>* outputStreamIds, int32_t *currentStreamId) {
+ int compositeIdx;
int idx = mStreamMap.indexOfKey(IInterface::asBinder(gbp));
// Trying to submit request with surface that wasn't created
@@ -152,6 +155,11 @@
__FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request targets Surface that is not part of current capture session");
+ } else if ((compositeIdx = mCompositeStreamMap.indexOfKey(IInterface::asBinder(gbp)))
+ != NAME_NOT_FOUND) {
+ mCompositeStreamMap.valueAt(compositeIdx)->insertGbp(outSurfaceMap, outputStreamIds,
+ currentStreamId);
+ return binder::Status::ok();
}
const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
@@ -489,6 +497,17 @@
mCameraIdStr.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ } else {
+ for (size_t i = 0; i < mCompositeStreamMap.size(); ++i) {
+ err = mCompositeStreamMap.valueAt(i)->configureStream();
+ if (err != OK ) {
+ String8 msg = String8::format("Camera %s: Error configuring composite "
+ "streams: %s (%d)", mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ break;
+ }
+ }
}
return res;
@@ -692,8 +711,35 @@
return res;
if (!isStreamInfoValid) {
- mapStreamInfo(streamInfo, static_cast<camera3_stream_rotation_t> (it.getRotation()),
- physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ if (camera3::DepthCompositeStream::isDepthCompositeStream(surface)) {
+ // We need to take in to account that composite streams can have
+ // additional internal camera streams.
+ std::vector<OutputStreamInfo> compositeStreams;
+ ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
+ mDevice->info(), &compositeStreams);
+ if (ret != OK) {
+ String8 msg = String8::format(
+ "Camera %s: Failed adding depth composite streams: %s (%d)",
+ mCameraIdStr.string(), strerror(-ret), ret);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ if (compositeStreams.size() > 1) {
+ streamCount += compositeStreams.size() - 1;
+ streamConfiguration.streams.resize(streamCount);
+ }
+
+ for (const auto& compositeStream : compositeStreams) {
+ mapStreamInfo(compositeStream,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
+ } else {
+ mapStreamInfo(streamInfo,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
isStreamInfoValid = true;
}
}
@@ -743,6 +789,7 @@
bool isInput = false;
std::vector<sp<IBinder>> surfaces;
ssize_t dIndex = NAME_NOT_FOUND;
+ ssize_t compositeIndex = NAME_NOT_FOUND;
if (mInputStream.configured && mInputStream.id == streamId) {
isInput = true;
@@ -762,6 +809,13 @@
}
}
+ for (size_t i = 0; i < mCompositeStreamMap.size(); ++i) {
+ if (streamId == mCompositeStreamMap.valueAt(i)->getStreamId()) {
+ compositeIndex = i;
+ break;
+ }
+ }
+
if (surfaces.empty() && dIndex == NAME_NOT_FOUND) {
String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
" stream created yet", mCameraIdStr.string(), streamId);
@@ -791,6 +845,19 @@
if (dIndex != NAME_NOT_FOUND) {
mDeferredStreams.removeItemsAt(dIndex);
}
+
+ if (compositeIndex != NAME_NOT_FOUND) {
+ status_t ret;
+ if ((ret = mCompositeStreamMap.valueAt(compositeIndex)->deleteStream())
+ != OK) {
+ String8 msg = String8::format("Camera %s: Unexpected error %s (%d) when "
+ "deleting composite stream %d", mCameraIdStr.string(), strerror(-err), err,
+ streamId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ mCompositeStreamMap.removeItemsAt(compositeIndex);
+ }
}
}
@@ -870,11 +937,25 @@
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
std::vector<int> surfaceIds;
- err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
- streamInfo.height, streamInfo.format, streamInfo.dataSpace,
- static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
- isShared);
+ if (!camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0])) {
+ err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
+ streamInfo.height, streamInfo.format, streamInfo.dataSpace,
+ static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+ &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
+ isShared);
+ } else {
+ sp<CompositeStream> compositeStream = new camera3::DepthCompositeStream(mDevice,
+ getRemoteCallback());
+ err = compositeStream->createStream(surfaces, deferredConsumer, streamInfo.width,
+ streamInfo.height, streamInfo.format,
+ static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+ &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
+ isShared);
+ if (err == OK) {
+ mCompositeStreamMap.add(IInterface::asBinder(surfaces[0]->getIGraphicBufferProducer()),
+ compositeStream);
+ }
+ }
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
@@ -1808,7 +1889,14 @@
// Thread safe. Don't bother locking.
sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
- if (remoteCb != 0) {
+ // Composites can have multiple internal streams. Error notifications coming from such internal
+ // streams may need to remain within camera service.
+ bool skipClientNotification = false;
+ for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+ skipClientNotification |= mCompositeStreamMap.valueAt(i)->onError(errorCode, resultExtras);
+ }
+
+ if ((remoteCb != 0) && (!skipClientNotification)) {
remoteCb->onDeviceError(errorCode, resultExtras);
}
}
@@ -1901,6 +1989,10 @@
remoteCb->onResultReceived(result.mMetadata, result.mResultExtras,
result.mPhysicalMetadatas);
}
+
+ for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+ mCompositeStreamMap.valueAt(i)->onResultAvailable(result);
+ }
}
binder::Status CameraDeviceClient::checkPidStatus(const char* checkLocation) {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 17a0983..1c5abb0 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -26,8 +26,10 @@
#include "CameraService.h"
#include "common/FrameProcessorBase.h"
#include "common/Camera2ClientBase.h"
+#include "CompositeStream.h"
using android::camera3::OutputStreamInfo;
+using android::camera3::CompositeStream;
namespace android {
@@ -314,6 +316,8 @@
// stream ID -> outputStreamInfo mapping
std::unordered_map<int32_t, OutputStreamInfo> mStreamInfoMap;
+ KeyedVector<sp<IBinder>, sp<CompositeStream>> mCompositeStreamMap;
+
static const int32_t MAX_SURFACES_PER_STREAM = 4;
sp<CameraProviderManager> mProviderManager;
};
diff --git a/services/camera/libcameraservice/api2/CompositeStream.cpp b/services/camera/libcameraservice/api2/CompositeStream.cpp
new file mode 100644
index 0000000..796bf42
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CompositeStream.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-CompositeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "common/CameraDeviceBase.h"
+#include "CameraDeviceClient.h"
+#include "CompositeStream.h"
+
+namespace android {
+namespace camera3 {
+
+CompositeStream::CompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
+ mDevice(device),
+ mRemoteCallback(cb),
+ mNumPartialResults(1),
+ mErrorState(false) {
+ sp<CameraDeviceBase> cameraDevice = device.promote();
+ if (cameraDevice.get() != nullptr) {
+ CameraMetadata staticInfo = cameraDevice->info();
+ camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+ if (entry.count > 0) {
+ mNumPartialResults = entry.data.i32[0];
+ }
+ }
+}
+
+status_t CompositeStream::createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int * id, const String8& physicalCameraId,
+ std::vector<int> * surfaceIds, int streamSetId, bool isShared) {
+ if (hasDeferredConsumer) {
+ ALOGE("%s: Deferred consumers not supported in case of composite streams!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (streamSetId != camera3::CAMERA3_STREAM_ID_INVALID) {
+ ALOGE("%s: Surface groups not supported in case of composite streams!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (isShared) {
+ ALOGE("%s: Shared surfaces not supported in case of composite streams!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ return createInternalStreams(consumers, hasDeferredConsumer, width, height, format, rotation, id,
+ physicalCameraId, surfaceIds, streamSetId, isShared);
+}
+
+status_t CompositeStream::deleteStream() {
+ {
+ Mutex::Autolock l(mMutex);
+ mPendingCaptureResults.clear();
+ mCaptureResults.clear();
+ mFrameNumberMap.clear();
+ mErrorFrameNumbers.clear();
+ }
+
+ return deleteInternalStreams();
+}
+
+void CompositeStream::onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) {
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState && (streamId == getStreamId())) {
+ mPendingCaptureResults.emplace(frameNumber, CameraMetadata());
+ }
+}
+
+void CompositeStream::onBufferReleased(const BufferInfo& bufferInfo) {
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState && !bufferInfo.mError) {
+ mFrameNumberMap.emplace(bufferInfo.mFrameNumber, bufferInfo.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+}
+
+void CompositeStream::eraseResult(int64_t frameNumber) {
+ Mutex::Autolock l(mMutex);
+
+ auto it = mPendingCaptureResults.find(frameNumber);
+ if (it == mPendingCaptureResults.end()) {
+ return;
+ }
+
+ it = mPendingCaptureResults.erase(it);
+}
+
+void CompositeStream::onResultAvailable(const CaptureResult& result) {
+ bool resultError = false;
+ {
+ Mutex::Autolock l(mMutex);
+
+ uint64_t frameNumber = result.mResultExtras.frameNumber;
+ bool resultReady = false;
+ auto it = mPendingCaptureResults.find(frameNumber);
+ if (it != mPendingCaptureResults.end()) {
+ it->second.append(result.mMetadata);
+ if (result.mResultExtras.partialResultCount >= mNumPartialResults) {
+ auto entry = it->second.find(ANDROID_SENSOR_TIMESTAMP);
+ if (entry.count == 1) {
+ auto ts = entry.data.i64[0];
+ mCaptureResults.emplace(ts, std::make_tuple(frameNumber, it->second));
+ resultReady = true;
+ } else {
+ ALOGE("%s: Timestamp metadata entry missing for frameNumber: %" PRIu64,
+ __FUNCTION__, frameNumber);
+ resultError = true;
+ }
+ mPendingCaptureResults.erase(it);
+ }
+ }
+
+ if (resultReady) {
+ mInputReadyCondition.signal();
+ }
+ }
+
+ if (resultError) {
+ onResultError(result.mResultExtras);
+ }
+}
+
+void CompositeStream::flagAnErrorFrameNumber(int64_t frameNumber) {
+ Mutex::Autolock l(mMutex);
+ mErrorFrameNumbers.emplace(frameNumber);
+ mInputReadyCondition.signal();
+}
+
+status_t CompositeStream::registerCompositeStreamListener(int32_t streamId) {
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device.get() == nullptr) {
+ return NO_INIT;
+ }
+
+ auto ret = device->addBufferListenerForStream(streamId, this);
+ if (ret != OK) {
+ ALOGE("%s: Failed to register composite stream listener!", __FUNCTION__);
+ }
+
+ return ret;
+}
+
+bool CompositeStream::onError(int32_t errorCode, const CaptureResultExtras& resultExtras) {
+ auto ret = false;
+ switch (errorCode) {
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+ onResultError(resultExtras);
+ break;
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+ ret = onStreamBufferError(resultExtras);
+ break;
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+ // Invalid request, this shouldn't affect composite streams.
+ break;
+ default:
+ ALOGE("%s: Unrecoverable error: %d detected!", __FUNCTION__, errorCode);
+ Mutex::Autolock l(mMutex);
+ mErrorState = true;
+ break;
+ }
+
+ return ret;
+}
+
+void CompositeStream::notifyError(int64_t frameNumber) {
+ sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb =
+ mRemoteCallback.promote();
+
+ if ((frameNumber >= 0) && (remoteCb.get() != nullptr)) {
+ CaptureResultExtras extras;
+ extras.errorStreamId = getStreamId();
+ extras.frameNumber = frameNumber;
+ remoteCb->onDeviceError(
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER,
+ extras);
+ }
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
new file mode 100644
index 0000000..5837745
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_COMPOSITE_STREAM_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_COMPOSITE_STREAM_H
+
+#include <set>
+#include <unordered_map>
+
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
+#include <camera/CameraMetadata.h>
+#include <camera/camera2/OutputConfiguration.h>
+#include "common/CameraDeviceBase.h"
+#include "device3/Camera3StreamInterface.h"
+
+namespace android {
+
+class CameraDeviceClient;
+class CameraMetadata;
+class Surface;
+
+namespace camera3 {
+
+class CompositeStream : public camera3::Camera3StreamBufferListener {
+
+public:
+ CompositeStream(wp<CameraDeviceBase> device, wp<hardware::camera2::ICameraDeviceCallbacks> cb);
+ virtual ~CompositeStream() {}
+
+ status_t createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared);
+
+ status_t deleteStream();
+
+ // Create and register all internal camera streams.
+ virtual status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared) = 0;
+
+ // Release all internal streams and corresponding resources.
+ virtual status_t deleteInternalStreams() = 0;
+
+ // Stream configuration completed.
+ virtual status_t configureStream() = 0;
+
+ // Insert the internal composite stream id in the user capture request.
+ virtual status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap,
+ Vector<int32_t>* /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) = 0;
+
+ // Return composite stream id.
+ virtual int getStreamId() = 0;
+
+ void onResultAvailable(const CaptureResult& result);
+ bool onError(int32_t errorCode, const CaptureResultExtras& resultExtras);
+
+ // Camera3StreamBufferListener implementation
+ void onBufferAcquired(const BufferInfo& /*bufferInfo*/) override { /*Empty for now */ }
+ void onBufferReleased(const BufferInfo& bufferInfo) override;
+ void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) override;
+
+protected:
+ status_t registerCompositeStreamListener(int32_t streamId);
+ void eraseResult(int64_t frameNumber);
+ void flagAnErrorFrameNumber(int64_t frameNumber);
+ void notifyError(int64_t frameNumber);
+
+ // Subclasses should check for buffer errors from internal streams and return 'true' in
+ // case the error notification should remain within camera service.
+ virtual bool onStreamBufferError(const CaptureResultExtras& resultExtras) = 0;
+
+ // Subclasses can decide how to handle result errors depending on whether or not the
+ // internal processing needs result data.
+ virtual void onResultError(const CaptureResultExtras& resultExtras) = 0;
+
+ // Device and/or service is in unrecoverable error state.
+ // Composite streams should behave accordingly.
+ void enableErrorState();
+
+ wp<CameraDeviceBase> mDevice;
+ wp<hardware::camera2::ICameraDeviceCallbacks> mRemoteCallback;
+
+ mutable Mutex mMutex;
+ Condition mInputReadyCondition;
+ int32_t mNumPartialResults;
+ bool mErrorState;
+
+ // Frame number to capture result map of partial pending request results.
+ std::unordered_map<uint64_t, CameraMetadata> mPendingCaptureResults;
+
+ // Timestamp to capture (frame number, result) map of completed pending request results.
+ std::unordered_map<int64_t, std::tuple<int64_t, CameraMetadata>> mCaptureResults;
+
+ // Frame number to timestamp map
+ std::unordered_map<int64_t, int64_t> mFrameNumberMap;
+
+ // Keeps a set buffer/result frame numbers for any errors detected during processing.
+ std::set<int64_t> mErrorFrameNumbers;
+
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
new file mode 100644
index 0000000..b12bb50
--- /dev/null
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -0,0 +1,1026 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DepthCompositeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include "api1/client2/JpegProcessor.h"
+#include "common/CameraProviderManager.h"
+
+#include <dynamic_depth/camera.h>
+#include <dynamic_depth/cameras.h>
+#include <dynamic_depth/container.h>
+#include <dynamic_depth/device.h>
+#include <dynamic_depth/dimension.h>
+#include <dynamic_depth/dynamic_depth.h>
+#include <dynamic_depth/point.h>
+#include <dynamic_depth/pose.h>
+#include <dynamic_depth/profile.h>
+#include <dynamic_depth/profiles.h>
+#include <xmpmeta/xmp_data.h>
+#include <xmpmeta/xmp_writer.h>
+
+#include <jpeglib.h>
+#include <math.h>
+
+#include <gui/Surface.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "DepthCompositeStream.h"
+
+using dynamic_depth::Camera;
+using dynamic_depth::Cameras;
+using dynamic_depth::CameraParams;
+using dynamic_depth::Container;
+using dynamic_depth::DepthFormat;
+using dynamic_depth::DepthMapParams;
+using dynamic_depth::DepthUnits;
+using dynamic_depth::Device;
+using dynamic_depth::DeviceParams;
+using dynamic_depth::Dimension;
+using dynamic_depth::Image;
+using dynamic_depth::ImagingModelParams;
+using dynamic_depth::Pose;
+using dynamic_depth::Profile;
+using dynamic_depth::Profiles;
+
+namespace android {
+namespace camera3 {
+
+DepthCompositeStream::DepthCompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
+ CompositeStream(device, cb),
+ mBlobStreamId(-1),
+ mBlobSurfaceId(-1),
+ mDepthStreamId(-1),
+ mDepthSurfaceId(-1),
+ mBlobWidth(0),
+ mBlobHeight(0),
+ mDepthBufferAcquired(false),
+ mBlobBufferAcquired(false),
+ mProducerListener(new ProducerListener()),
+ mMaxJpegSize(-1),
+ mIsLogicalCamera(false) {
+ sp<CameraDeviceBase> cameraDevice = device.promote();
+ if (cameraDevice.get() != nullptr) {
+ CameraMetadata staticInfo = cameraDevice->info();
+ auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
+ if (entry.count > 0) {
+ mMaxJpegSize = entry.data.i32[0];
+ } else {
+ ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
+ }
+
+ entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
+ if (entry.count == 5) {
+ mInstrinsicCalibration.reserve(5);
+ mInstrinsicCalibration.insert(mInstrinsicCalibration.end(), entry.data.f,
+ entry.data.f + 5);
+ } else {
+ ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
+ }
+
+ entry = staticInfo.find(ANDROID_LENS_DISTORTION);
+ if (entry.count == 5) {
+ mLensDistortion.reserve(5);
+ mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
+ } else {
+ ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
+ }
+
+ entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ for (size_t i = 0; i < entry.count; ++i) {
+ uint8_t capability = entry.data.u8[i];
+ if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
+ mIsLogicalCamera = true;
+ break;
+ }
+ }
+
+ getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes);
+ }
+}
+
+DepthCompositeStream::~DepthCompositeStream() {
+ mBlobConsumer.clear(),
+ mBlobSurface.clear(),
+ mBlobStreamId = -1;
+ mBlobSurfaceId = -1;
+ mDepthConsumer.clear();
+ mDepthSurface.clear();
+ mDepthConsumer = nullptr;
+ mDepthSurface = nullptr;
+}
+
+void DepthCompositeStream::compilePendingInputLocked() {
+ CpuConsumer::LockedBuffer imgBuffer;
+
+ while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
+ auto it = mInputJpegBuffers.begin();
+ auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Can not lock any more buffers.
+ break;
+ } else if (res != OK) {
+ ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ mPendingInputFrames[*it].error = true;
+ mInputDepthBuffers.erase(it);
+ continue;
+ }
+
+ if (*it != imgBuffer.timestamp) {
+ ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
+ "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mBlobConsumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
+ mBlobBufferAcquired = true;
+ }
+ mInputJpegBuffers.erase(it);
+ }
+
+ while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
+ auto it = mInputDepthBuffers.begin();
+ auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Can not lock any more buffers.
+ break;
+ } else if (res != OK) {
+ ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ mPendingInputFrames[*it].error = true;
+ mInputDepthBuffers.erase(it);
+ continue;
+ }
+
+ if (*it != imgBuffer.timestamp) {
+ ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
+ "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mDepthConsumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
+ mDepthBufferAcquired = true;
+ }
+ mInputDepthBuffers.erase(it);
+ }
+
+ while (!mCaptureResults.empty()) {
+ auto it = mCaptureResults.begin();
+ // Negative timestamp indicates that something went wrong during the capture result
+ // collection process.
+ if (it->first >= 0) {
+ mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
+ mPendingInputFrames[it->first].result = std::get<1>(it->second);
+ }
+ mCaptureResults.erase(it);
+ }
+
+ while (!mFrameNumberMap.empty()) {
+ auto it = mFrameNumberMap.begin();
+ mPendingInputFrames[it->second].frameNumber = it->first;
+ mFrameNumberMap.erase(it);
+ }
+
+ auto it = mErrorFrameNumbers.begin();
+ while (it != mErrorFrameNumbers.end()) {
+ bool frameFound = false;
+ for (auto &inputFrame : mPendingInputFrames) {
+ if (inputFrame.second.frameNumber == *it) {
+ inputFrame.second.error = true;
+ frameFound = true;
+ break;
+ }
+ }
+
+ if (frameFound) {
+ it = mErrorFrameNumbers.erase(it);
+ } else {
+ ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
+ *it);
+ it++;
+ }
+ }
+}
+
+bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
+ if (currentTs == nullptr) {
+ return false;
+ }
+
+ bool newInputAvailable = false;
+ for (const auto& it : mPendingInputFrames) {
+ if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
+ (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
+ *currentTs = it.first;
+ newInputAvailable = true;
+ }
+ }
+
+ return newInputAvailable;
+}
+
+int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
+ int64_t ret = -1;
+ if (currentTs == nullptr) {
+ return ret;
+ }
+
+ for (const auto& it : mPendingInputFrames) {
+ if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
+ *currentTs = it.first;
+ ret = it.second.frameNumber;
+ }
+ }
+
+ return ret;
+}
+
+status_t DepthCompositeStream::encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in,
+ void *out, const size_t maxOutSize, uint8_t jpegQuality, size_t &actualSize) {
+ status_t ret;
+ // libjpeg is a C library so we use C-style "inheritance" by
+ // putting libjpeg's jpeg_destination_mgr first in our custom
+ // struct. This allows us to cast jpeg_destination_mgr* to
+ // CustomJpegDestMgr* when we get it passed to us in a callback.
+ struct CustomJpegDestMgr : public jpeg_destination_mgr {
+ JOCTET *mBuffer;
+ size_t mBufferSize;
+ size_t mEncodedSize;
+ bool mSuccess;
+ } dmgr;
+
+ jpeg_compress_struct cinfo = {};
+ jpeg_error_mgr jerr;
+
+ // Initialize error handling with standard callbacks, but
+ // then override output_message (to print to ALOG) and
+ // error_exit to set a flag and print a message instead
+ // of killing the whole process.
+ cinfo.err = jpeg_std_error(&jerr);
+
+ cinfo.err->output_message = [](j_common_ptr cinfo) {
+ char buffer[JMSG_LENGTH_MAX];
+
+ /* Create the message */
+ (*cinfo->err->format_message)(cinfo, buffer);
+ ALOGE("libjpeg error: %s", buffer);
+ };
+
+ cinfo.err->error_exit = [](j_common_ptr cinfo) {
+ (*cinfo->err->output_message)(cinfo);
+ if(cinfo->client_data) {
+ auto & dmgr = *static_cast<CustomJpegDestMgr*>(cinfo->client_data);
+ dmgr.mSuccess = false;
+ }
+ };
+
+ // Now that we initialized some callbacks, let's create our compressor
+ jpeg_create_compress(&cinfo);
+ dmgr.mBuffer = static_cast<JOCTET*>(out);
+ dmgr.mBufferSize = maxOutSize;
+ dmgr.mEncodedSize = 0;
+ dmgr.mSuccess = true;
+ cinfo.client_data = static_cast<void*>(&dmgr);
+
+ // These lambdas become C-style function pointers and as per C++11 spec
+ // may not capture anything.
+ dmgr.init_destination = [](j_compress_ptr cinfo) {
+ auto & dmgr = static_cast<CustomJpegDestMgr&>(*cinfo->dest);
+ dmgr.next_output_byte = dmgr.mBuffer;
+ dmgr.free_in_buffer = dmgr.mBufferSize;
+ ALOGV("%s:%d jpeg start: %p [%zu]",
+ __FUNCTION__, __LINE__, dmgr.mBuffer, dmgr.mBufferSize);
+ };
+
+ dmgr.empty_output_buffer = [](j_compress_ptr cinfo __unused) {
+ ALOGV("%s:%d Out of buffer", __FUNCTION__, __LINE__);
+ return 0;
+ };
+
+ dmgr.term_destination = [](j_compress_ptr cinfo) {
+ auto & dmgr = static_cast<CustomJpegDestMgr&>(*cinfo->dest);
+ dmgr.mEncodedSize = dmgr.mBufferSize - dmgr.free_in_buffer;
+ ALOGV("%s:%d Done with jpeg: %zu", __FUNCTION__, __LINE__, dmgr.mEncodedSize);
+ };
+ cinfo.dest = reinterpret_cast<struct jpeg_destination_mgr*>(&dmgr);
+ cinfo.image_width = width;
+ cinfo.image_height = height;
+ cinfo.input_components = 1;
+ cinfo.in_color_space = JCS_GRAYSCALE;
+
+ // Initialize defaults and then override what we want
+ jpeg_set_defaults(&cinfo);
+
+ jpeg_set_quality(&cinfo, jpegQuality, 1);
+ jpeg_set_colorspace(&cinfo, JCS_GRAYSCALE);
+ cinfo.raw_data_in = 0;
+ cinfo.dct_method = JDCT_IFAST;
+
+ cinfo.comp_info[0].h_samp_factor = 1;
+ cinfo.comp_info[1].h_samp_factor = 1;
+ cinfo.comp_info[2].h_samp_factor = 1;
+ cinfo.comp_info[0].v_samp_factor = 1;
+ cinfo.comp_info[1].v_samp_factor = 1;
+ cinfo.comp_info[2].v_samp_factor = 1;
+
+ jpeg_start_compress(&cinfo, TRUE);
+
+ for (size_t i = 0; i < cinfo.image_height; i++) {
+ auto currentRow = static_cast<JSAMPROW>(in + i*width);
+ jpeg_write_scanlines(&cinfo, ¤tRow, /*num_lines*/1);
+ }
+
+ jpeg_finish_compress(&cinfo);
+
+ actualSize = dmgr.mEncodedSize;
+ if (dmgr.mSuccess) {
+ ret = NO_ERROR;
+ } else {
+ ret = UNKNOWN_ERROR;
+ }
+
+ return ret;
+}
+
+std::unique_ptr<DepthMap> DepthCompositeStream::processDepthMapFrame(
+ const CpuConsumer::LockedBuffer &depthMapBuffer, size_t maxJpegSize, uint8_t jpegQuality,
+ std::vector<std::unique_ptr<Item>> *items /*out*/) {
+ std::vector<float> points, confidence;
+
+ size_t pointCount = depthMapBuffer.width * depthMapBuffer.height;
+ points.reserve(pointCount);
+ confidence.reserve(pointCount);
+ float near = UINT16_MAX;
+ float far = .0f;
+ uint16_t *data = reinterpret_cast<uint16_t *> (depthMapBuffer.data);
+ for (size_t i = 0; i < depthMapBuffer.height; i++) {
+ for (size_t j = 0; j < depthMapBuffer.width; j++) {
+ // Android densely packed depth map. The units for the range are in
+ // millimeters and need to be scaled to meters.
+ // The confidence value is encoded in the 3 most significant bits.
+ // The confidence data needs to be additionally normalized with
+ // values 1.0f, 0.0f representing maximum and minimum confidence
+ // respectively.
+ auto value = data[i*depthMapBuffer.stride + j];
+ auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
+ points.push_back(point);
+
+ auto conf = (value >> 13) & 0x7;
+ float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
+ confidence.push_back(normConfidence);
+
+ if (near > point) {
+ near = point;
+ }
+ if (far < point) {
+ far = point;
+ }
+ }
+ }
+
+ if (near == far) {
+ ALOGE("%s: Near and far range values must not match!", __FUNCTION__);
+ return nullptr;
+ }
+
+ std::vector<uint8_t> pointsQuantized, confidenceQuantized;
+ pointsQuantized.reserve(pointCount); confidenceQuantized.reserve(pointCount);
+ auto pointIt = points.begin();
+ auto confidenceIt = confidence.begin();
+ while ((pointIt != points.end()) && (confidenceIt != confidence.end())) {
+ pointsQuantized.push_back(floorf(((far * (*pointIt - near)) /
+ (*pointIt * (far - near))) * 255.0f));
+ confidenceQuantized.push_back(floorf(*confidenceIt * 255.0f));
+ confidenceIt++; pointIt++;
+ }
+
+ DepthMapParams depthParams(DepthFormat::kRangeInverse, near, far, DepthUnits::kMeters,
+ "android/depthmap");
+ depthParams.confidence_uri = "android/confidencemap";
+ depthParams.mime = "image/jpeg";
+ depthParams.depth_image_data.resize(maxJpegSize);
+ depthParams.confidence_data.resize(maxJpegSize);
+ size_t actualJpegSize;
+ auto ret = encodeGrayscaleJpeg(depthMapBuffer.width, depthMapBuffer.height,
+ pointsQuantized.data(), depthParams.depth_image_data.data(), maxJpegSize, jpegQuality,
+ actualJpegSize);
+ if (ret != NO_ERROR) {
+ ALOGE("%s: Depth map compression failed!", __FUNCTION__);
+ return nullptr;
+ }
+ depthParams.depth_image_data.resize(actualJpegSize);
+
+ ret = encodeGrayscaleJpeg(depthMapBuffer.width, depthMapBuffer.height,
+ confidenceQuantized.data(), depthParams.confidence_data.data(), maxJpegSize,
+ jpegQuality, actualJpegSize);
+ if (ret != NO_ERROR) {
+ ALOGE("%s: Confidence map compression failed!", __FUNCTION__);
+ return nullptr;
+ }
+ depthParams.confidence_data.resize(actualJpegSize);
+
+ return DepthMap::FromData(depthParams, items);
+}
+
+status_t DepthCompositeStream::processInputFrame(const InputFrame &inputFrame) {
+ status_t res;
+ sp<ANativeWindow> outputANW = mOutputSurface;
+ ANativeWindowBuffer *anb;
+ int fenceFd;
+ void *dstBuffer;
+ auto imgBuffer = inputFrame.jpegBuffer;
+
+ auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
+ inputFrame.jpegBuffer.width);
+ if (jpegSize == 0) {
+ ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
+ jpegSize = inputFrame.jpegBuffer.width;
+ }
+
+ std::vector<std::unique_ptr<Item>> items;
+ std::vector<std::unique_ptr<Camera>> cameraList;
+ auto image = Image::FromDataForPrimaryImage("android/mainimage", &items);
+ std::unique_ptr<CameraParams> cameraParams(new CameraParams(std::move(image)));
+ if (cameraParams == nullptr) {
+ ALOGE("%s: Failed to initialize camera parameters", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ size_t maxDepthJpegSize;
+ if (mMaxJpegSize > 0) {
+ maxDepthJpegSize = mMaxJpegSize;
+ } else {
+ maxDepthJpegSize = std::max<size_t> (jpegSize,
+ inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
+ }
+ uint8_t jpegQuality = 100;
+ auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
+ if (entry.count > 0) {
+ jpegQuality = entry.data.u8[0];
+ }
+ cameraParams->depth_map = processDepthMapFrame(inputFrame.depthBuffer, maxDepthJpegSize,
+ jpegQuality, &items);
+ if (cameraParams->depth_map == nullptr) {
+ ALOGE("%s: Depth map processing failed!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ cameraParams->imaging_model = getImagingModel();
+
+ if (mIsLogicalCamera) {
+ cameraParams->trait = dynamic_depth::CameraTrait::LOGICAL;
+ } else {
+ cameraParams->trait = dynamic_depth::CameraTrait::PHYSICAL;
+ }
+
+ cameraList.emplace_back(Camera::FromData(std::move(cameraParams)));
+
+ auto deviceParams = std::make_unique<DeviceParams> (Cameras::FromCameraArray(&cameraList));
+ deviceParams->container = Container::FromItems(&items);
+ std::vector<std::unique_ptr<Profile>> profileList;
+ profileList.emplace_back(Profile::FromData("DepthPhoto", {0}));
+ deviceParams->profiles = Profiles::FromProfileArray(&profileList);
+ std::unique_ptr<Device> device = Device::FromData(std::move(deviceParams));
+ if (device == nullptr) {
+ ALOGE("%s: Failed to initialize camera device", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ std::istringstream inputJpegStream(std::string(reinterpret_cast<const char *> (imgBuffer.data),
+ jpegSize));
+ std::ostringstream outputJpegStream;
+ if (!WriteImageAndMetadataAndContainer(&inputJpegStream, device.get(), &outputJpegStream)) {
+ ALOGE("%s: Failed writing depth output", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ size_t finalJpegSize = static_cast<size_t> (outputJpegStream.tellp()) +
+ sizeof(struct camera3_jpeg_blob);
+
+ ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
+ if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegSize, 1))
+ != OK) {
+ ALOGE("%s: Unable to configure stream buffer dimensions"
+ " %zux%u for stream %d", __FUNCTION__, finalJpegSize, 1U, mBlobStreamId);
+ return res;
+ }
+
+ res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
+ res);
+ return res;
+ }
+
+ sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
+ res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return res;
+ }
+
+ if ((gb->getWidth() < finalJpegSize) || (gb->getHeight() != 1)) {
+ ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
+ gb->getWidth(), gb->getHeight(), finalJpegSize, 1U);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return BAD_VALUE;
+ }
+
+ // Copy final jpeg with embedded depth data in the composite stream output buffer
+ uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
+ (gb->getWidth() - sizeof(struct camera3_jpeg_blob));
+ struct camera3_jpeg_blob *blob = reinterpret_cast<struct camera3_jpeg_blob*> (header);
+ blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
+ blob->jpeg_size = static_cast<uint32_t> (outputJpegStream.tellp());
+ memcpy(dstBuffer, outputJpegStream.str().c_str(), blob->jpeg_size);
+ outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+
+ return res;
+}
+
+void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
+ if (inputFrame == nullptr) {
+ return;
+ }
+
+ if (inputFrame->depthBuffer.data != nullptr) {
+ mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
+ inputFrame->depthBuffer.data = nullptr;
+ mDepthBufferAcquired = false;
+ }
+
+ if (inputFrame->jpegBuffer.data != nullptr) {
+ mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
+ inputFrame->jpegBuffer.data = nullptr;
+ mBlobBufferAcquired = false;
+ }
+
+ if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
+ notifyError(inputFrame->frameNumber);
+ inputFrame->errorNotified = true;
+ }
+}
+
+void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
+ auto it = mPendingInputFrames.begin();
+ while (it != mPendingInputFrames.end()) {
+ if (it->first <= currentTs) {
+ releaseInputFrameLocked(&it->second);
+ it = mPendingInputFrames.erase(it);
+ } else {
+ it++;
+ }
+ }
+}
+
+bool DepthCompositeStream::threadLoop() {
+ int64_t currentTs = INT64_MAX;
+ bool newInputAvailable = false;
+
+ {
+ Mutex::Autolock l(mMutex);
+
+ if (mErrorState) {
+ // In case we landed in error state, return any pending buffers and
+ // halt all further processing.
+ compilePendingInputLocked();
+ releaseInputFramesLocked(currentTs);
+ return false;
+ }
+
+ while (!newInputAvailable) {
+ compilePendingInputLocked();
+ newInputAvailable = getNextReadyInputLocked(¤tTs);
+ if (!newInputAvailable) {
+ auto failingFrameNumber = getNextFailingInputLocked(¤tTs);
+ if (failingFrameNumber >= 0) {
+ // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
+ // possible for two internal stream buffers to fail. In such scenario the
+ // composite stream should notify the client about a stream buffer error only
+ // once and this information is kept within 'errorNotified'.
+ // Any present failed input frames will be removed on a subsequent call to
+ // 'releaseInputFramesLocked()'.
+ releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
+ currentTs = INT64_MAX;
+ }
+
+ auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
+ if (ret == TIMED_OUT) {
+ return true;
+ } else if (ret != OK) {
+ ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ return false;
+ }
+ }
+ }
+ }
+
+ auto res = processInputFrame(mPendingInputFrames[currentTs]);
+ Mutex::Autolock l(mMutex);
+ if (res != OK) {
+ ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
+ currentTs, strerror(-res), res);
+ mPendingInputFrames[currentTs].error = true;
+ }
+
+ releaseInputFramesLocked(currentTs);
+
+ return true;
+}
+
+bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
+ ANativeWindow *anw = surface.get();
+ status_t err;
+ int format;
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
+ err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return false;
+ }
+
+ int dataspace;
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
+ String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
+ err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return false;
+ }
+
+ if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
+ return true;
+ }
+
+ return false;
+}
+
+status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
+ if (mSupportedDepthSizes.empty()) {
+ ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ size_t depthWidth, depthHeight;
+ auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight);
+ if (ret != OK) {
+ ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
+ return ret;
+ }
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (!device.get()) {
+ ALOGE("%s: Invalid camera device!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
+ mBlobConsumer->setFrameAvailableListener(this);
+ mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
+ mBlobSurface = new Surface(producer);
+
+ ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
+ id, physicalCameraId, surfaceIds);
+ if (ret == OK) {
+ mBlobStreamId = *id;
+ mBlobSurfaceId = (*surfaceIds)[0];
+ mOutputSurface = consumers[0];
+ } else {
+ return ret;
+ }
+
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
+ mDepthConsumer->setFrameAvailableListener(this);
+ mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
+ mDepthSurface = new Surface(producer);
+ std::vector<int> depthSurfaceId;
+ ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
+ kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId);
+ if (ret == OK) {
+ mDepthSurfaceId = depthSurfaceId[0];
+ } else {
+ return ret;
+ }
+
+ ret = registerCompositeStreamListener(getStreamId());
+ if (ret != OK) {
+ ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
+ return ret;
+ }
+
+ ret = registerCompositeStreamListener(mDepthStreamId);
+ if (ret != OK) {
+ ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
+ return ret;
+ }
+
+ mBlobWidth = width;
+ mBlobHeight = height;
+
+ return ret;
+}
+
+status_t DepthCompositeStream::configureStream() {
+ if (isRunning()) {
+ // Processing thread is already running, nothing more to do.
+ return NO_ERROR;
+ }
+
+ if (mOutputSurface.get() == nullptr) {
+ ALOGE("%s: No valid output surface set!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
+ if (res != OK) {
+ ALOGE("%s: Unable to connect to native window for stream %d",
+ __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
+ != OK) {
+ ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
+ mBlobStreamId);
+ return res;
+ }
+
+ int maxProducerBuffers;
+ ANativeWindow *anw = mBlobSurface.get();
+ if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ ANativeWindow *anwConsumer = mOutputSurface.get();
+ int maxConsumerBuffers;
+ if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+ &maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffer_count(
+ anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ run("DepthCompositeStreamProc");
+
+ return NO_ERROR;
+}
+
+status_t DepthCompositeStream::deleteInternalStreams() {
+ // The 'CameraDeviceClient' parent will delete the blob stream
+ requestExit();
+
+ auto ret = join();
+ if (ret != OK) {
+ ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ }
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (!device.get()) {
+ ALOGE("%s: Invalid camera device!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ if (mDepthStreamId >= 0) {
+ ret = device->deleteStream(mDepthStreamId);
+ mDepthStreamId = -1;
+ }
+
+ return ret;
+}
+
+void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
+ if (item.mDataSpace == kJpegDataSpace) {
+ ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
+ __func__, ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState) {
+ mInputJpegBuffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else if (item.mDataSpace == kDepthMapDataSpace) {
+ ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
+ ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState) {
+ mInputDepthBuffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else {
+ ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
+ }
+}
+
+status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
+ Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
+ if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
+ (*outSurfaceMap)[mDepthStreamId] = std::vector<size_t>();
+ outputStreamIds->push_back(mDepthStreamId);
+ }
+ (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
+
+ if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
+ (*outSurfaceMap)[mBlobStreamId] = std::vector<size_t>();
+ outputStreamIds->push_back(mBlobStreamId);
+ }
+ (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
+
+ if (currentStreamId != nullptr) {
+ *currentStreamId = mBlobStreamId;
+ }
+
+ return NO_ERROR;
+}
+
+void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
+ // Processing can continue even in case of result errors.
+ // At the moment depth composite stream processing relies mainly on static camera
+ // characteristics data. The actual result data can be used for the jpeg quality but
+ // in case it is absent we can default to maximum.
+ eraseResult(resultExtras.frameNumber);
+}
+
+bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
+ bool ret = false;
+ // Buffer errors concerning internal composite streams should not be directly visible to
+ // camera clients. They must only receive a single buffer error with the public composite
+ // stream id.
+ if ((resultExtras.errorStreamId == mDepthStreamId) ||
+ (resultExtras.errorStreamId == mBlobStreamId)) {
+ flagAnErrorFrameNumber(resultExtras.frameNumber);
+ ret = true;
+ }
+
+ return ret;
+}
+
+status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
+ size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
+ if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
+ return BAD_VALUE;
+ }
+
+ float arTol = CameraProviderManager::kDepthARTolerance;
+ *depthWidth = *depthHeight = 0;
+
+ float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
+ for (const auto& it : supporedDepthSizes) {
+ auto currentWidth = std::get<0>(it);
+ auto currentHeight = std::get<1>(it);
+ if ((currentWidth == width) && (currentHeight == height)) {
+ *depthWidth = width;
+ *depthHeight = height;
+ break;
+ } else {
+ float currentRatio = static_cast<float> (currentWidth) /
+ static_cast<float> (currentHeight);
+ auto currentSize = currentWidth * currentHeight;
+ auto oldSize = (*depthWidth) * (*depthHeight);
+ if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
+ *depthWidth = currentWidth;
+ *depthHeight = currentHeight;
+ }
+ }
+ }
+
+ return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
+}
+
+void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch,
+ std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
+ if (depthSizes == nullptr) {
+ return;
+ }
+
+ auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
+ if (entry.count > 0) {
+ // Depth stream dimensions have four int32_t components
+ // (pixelformat, width, height, type)
+ size_t entryCount = entry.count / 4;
+ depthSizes->reserve(entryCount);
+ for (size_t i = 0; i < entry.count; i += 4) {
+ if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
+ (entry.data.i32[i+3] ==
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
+ depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
+ entry.data.i32[i+2]));
+ }
+ }
+ }
+}
+
+status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
+ if (compositeOutput == nullptr) {
+ return BAD_VALUE;
+ }
+
+ std::vector<std::tuple<size_t, size_t>> depthSizes;
+ getSupportedDepthSizes(ch, &depthSizes);
+ if (depthSizes.empty()) {
+ ALOGE("%s: No depth stream configurations present", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ size_t depthWidth, depthHeight;
+ auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth,
+ &depthHeight);
+ if (ret != OK) {
+ ALOGE("%s: No matching depth stream size found", __FUNCTION__);
+ return ret;
+ }
+
+ compositeOutput->clear();
+ compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
+
+ // Jpeg/Blob stream info
+ (*compositeOutput)[0].dataSpace = kJpegDataSpace;
+ (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+
+ // Depth stream info
+ (*compositeOutput)[1].width = depthWidth;
+ (*compositeOutput)[1].height = depthHeight;
+ (*compositeOutput)[1].format = kDepthMapPixelFormat;
+ (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
+ (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+
+ return NO_ERROR;
+}
+
+std::unique_ptr<ImagingModel> DepthCompositeStream::getImagingModel() {
+ // It is not possible to generate an imaging model without instrinsic calibration.
+ if (mInstrinsicCalibration.empty() || mInstrinsicCalibration.size() != 5) {
+ return nullptr;
+ }
+
+ // The camera intrinsic calibration layout is as follows:
+ // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
+ const dynamic_depth::Point<double> focalLength(mInstrinsicCalibration[0],
+ mInstrinsicCalibration[1]);
+ const Dimension imageSize(mBlobWidth, mBlobHeight);
+ ImagingModelParams params(focalLength, imageSize);
+ params.principal_point.x = mInstrinsicCalibration[2];
+ params.principal_point.y = mInstrinsicCalibration[3];
+ params.skew = mInstrinsicCalibration[4];
+
+ // The camera lens distortion contains the following lens correction coefficients.
+ // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
+ if (mLensDistortion.size() == 5) {
+ // According to specification the lens distortion coefficients should be ordered
+ // as [1, kappa_4, kappa_1, kappa_5, kappa_2, 0, kappa_3, 0]
+ float distortionData[] = {1.f, mLensDistortion[3], mLensDistortion[0], mLensDistortion[4],
+ mLensDistortion[1], 0.f, mLensDistortion[2], 0.f};
+ auto distortionDataLength = sizeof(distortionData) / sizeof(distortionData[0]);
+ params.distortion.reserve(distortionDataLength);
+ params.distortion.insert(params.distortion.end(), distortionData,
+ distortionData + distortionDataLength);
+ }
+
+ return ImagingModel::FromData(params);
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
new file mode 100644
index 0000000..129d538
--- /dev/null
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_COMPOSITE_STREAM_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_COMPOSITE_STREAM_H
+
+#include <dynamic_depth/imaging_model.h>
+#include <dynamic_depth/depth_map.h>
+
+#include <gui/IProducerListener.h>
+#include <gui/CpuConsumer.h>
+
+#include "CompositeStream.h"
+
+using dynamic_depth::DepthMap;
+using dynamic_depth::Item;
+using dynamic_depth::ImagingModel;
+
+namespace android {
+
+class CameraDeviceClient;
+class CameraMetadata;
+class Surface;
+
+namespace camera3 {
+
+class DepthCompositeStream : public CompositeStream, public Thread,
+ public CpuConsumer::FrameAvailableListener {
+
+public:
+ DepthCompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb);
+ ~DepthCompositeStream() override;
+
+ static bool isDepthCompositeStream(const sp<Surface> &surface);
+
+ // CompositeStream overrides
+ status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared) override;
+ status_t deleteInternalStreams() override;
+ status_t configureStream() override;
+ status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap, Vector<int32_t>* /*out*/outputStreamIds,
+ int32_t* /*out*/currentStreamId) override;
+ int getStreamId() override { return mBlobStreamId; }
+
+ // CpuConsumer listener implementation
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // Return stream information about the internal camera streams
+ static status_t getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/);
+
+protected:
+
+ bool threadLoop() override;
+ bool onStreamBufferError(const CaptureResultExtras& resultExtras) override;
+ void onResultError(const CaptureResultExtras& resultExtras) override;
+
+private:
+ struct InputFrame {
+ CpuConsumer::LockedBuffer depthBuffer;
+ CpuConsumer::LockedBuffer jpegBuffer;
+ CameraMetadata result;
+ bool error;
+ bool errorNotified;
+ int64_t frameNumber;
+
+ InputFrame() : error(false), errorNotified(false), frameNumber(-1) { }
+ };
+
+ // Helper methods
+ static void getSupportedDepthSizes(const CameraMetadata& ch,
+ std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/);
+ static status_t getMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
+ size_t *depthWidth /*out*/, size_t *depthHeight /*out*/);
+
+ // Dynamic depth processing
+ status_t encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in, void *out,
+ const size_t maxOutSize, uint8_t jpegQuality, size_t &actualSize);
+ std::unique_ptr<DepthMap> processDepthMapFrame(const CpuConsumer::LockedBuffer &depthMapBuffer,
+ size_t maxJpegSize, uint8_t jpegQuality,
+ std::vector<std::unique_ptr<Item>>* items /*out*/);
+ std::unique_ptr<ImagingModel> getImagingModel();
+ status_t processInputFrame(const InputFrame &inputFrame);
+
+ // Buffer/Results handling
+ void compilePendingInputLocked();
+ void releaseInputFrameLocked(InputFrame *inputFrame /*out*/);
+ void releaseInputFramesLocked(int64_t currentTs);
+
+ // Find first complete and valid frame with smallest timestamp
+ bool getNextReadyInputLocked(int64_t *currentTs /*inout*/);
+
+ // Find next failing frame number with smallest timestamp and return respective frame number
+ int64_t getNextFailingInputLocked(int64_t *currentTs /*inout*/);
+
+ static const nsecs_t kWaitDuration = 10000000; // 10 ms
+ static const auto kDepthMapPixelFormat = HAL_PIXEL_FORMAT_Y16;
+ static const auto kDepthMapDataSpace = HAL_DATASPACE_DEPTH;
+ static const auto kJpegDataSpace = HAL_DATASPACE_V0_JFIF;
+
+ struct ProducerListener : public BnProducerListener {
+ // ProducerListener implementation
+ void onBufferReleased() override { /*No impl. for now*/ };
+ };
+
+ int mBlobStreamId, mBlobSurfaceId, mDepthStreamId, mDepthSurfaceId;
+ size_t mBlobWidth, mBlobHeight;
+ sp<CpuConsumer> mBlobConsumer, mDepthConsumer;
+ bool mDepthBufferAcquired, mBlobBufferAcquired;
+ sp<Surface> mDepthSurface, mBlobSurface, mOutputSurface;
+ sp<ProducerListener> mProducerListener;
+
+ ssize_t mMaxJpegSize;
+ std::vector<std::tuple<size_t, size_t>> mSupportedDepthSizes;
+ std::vector<float> mInstrinsicCalibration, mLensDistortion;
+ bool mIsLogicalCamera;
+
+ // Keep all incoming Depth buffer timestamps pending further processing.
+ std::vector<int64_t> mInputDepthBuffers;
+
+ // Keep all incoming Jpeg/Blob buffer timestamps pending further processing.
+ std::vector<int64_t> mInputJpegBuffers;
+
+ // Map of all input frames pending further processing.
+ std::unordered_map<int64_t, InputFrame> mPendingInputFrames;
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif