| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2018 The Android Open Source Project | 
|  | 3 | * | 
|  | 4 | * Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 5 | * you may not use this file except in compliance with the License. | 
|  | 6 | * You may obtain a copy of the License at | 
|  | 7 | * | 
|  | 8 | *      http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 9 | * | 
|  | 10 | * Unless required by applicable law or agreed to in writing, software | 
|  | 11 | * distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
|  | 13 | * See the License for the specific language governing permissions and | 
|  | 14 | * limitations under the License. | 
|  | 15 | */ | 
|  | 16 |  | 
|  | 17 | #define LOG_TAG "Camera3-DepthCompositeStream" | 
|  | 18 | #define ATRACE_TAG ATRACE_TAG_CAMERA | 
|  | 19 | //#define LOG_NDEBUG 0 | 
|  | 20 |  | 
|  | 21 | #include "api1/client2/JpegProcessor.h" | 
|  | 22 | #include "common/CameraProviderManager.h" | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 23 | #include "dlfcn.h" | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 24 | #include <gui/Surface.h> | 
|  | 25 | #include <utils/Log.h> | 
|  | 26 | #include <utils/Trace.h> | 
|  | 27 |  | 
|  | 28 | #include "DepthCompositeStream.h" | 
|  | 29 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 30 | namespace android { | 
|  | 31 | namespace camera3 { | 
|  | 32 |  | 
|  | 33 | DepthCompositeStream::DepthCompositeStream(wp<CameraDeviceBase> device, | 
|  | 34 | wp<hardware::camera2::ICameraDeviceCallbacks> cb) : | 
|  | 35 | CompositeStream(device, cb), | 
|  | 36 | mBlobStreamId(-1), | 
|  | 37 | mBlobSurfaceId(-1), | 
|  | 38 | mDepthStreamId(-1), | 
|  | 39 | mDepthSurfaceId(-1), | 
|  | 40 | mBlobWidth(0), | 
|  | 41 | mBlobHeight(0), | 
|  | 42 | mDepthBufferAcquired(false), | 
|  | 43 | mBlobBufferAcquired(false), | 
|  | 44 | mProducerListener(new ProducerListener()), | 
|  | 45 | mMaxJpegSize(-1), | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 46 | mIsLogicalCamera(false), | 
|  | 47 | mDepthPhotoLibHandle(nullptr), | 
|  | 48 | mDepthPhotoProcess(nullptr) { | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 49 | sp<CameraDeviceBase> cameraDevice = device.promote(); | 
|  | 50 | if (cameraDevice.get() != nullptr) { | 
|  | 51 | CameraMetadata staticInfo = cameraDevice->info(); | 
|  | 52 | auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE); | 
|  | 53 | if (entry.count > 0) { | 
|  | 54 | mMaxJpegSize = entry.data.i32[0]; | 
|  | 55 | } else { | 
|  | 56 | ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__); | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 | entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION); | 
|  | 60 | if (entry.count == 5) { | 
| Emilian Peev | 94c9802 | 2019-06-19 09:11:51 -0700 | [diff] [blame] | 61 | mIntrinsicCalibration.reserve(5); | 
|  | 62 | mIntrinsicCalibration.insert(mIntrinsicCalibration.end(), entry.data.f, | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 63 | entry.data.f + 5); | 
|  | 64 | } else { | 
|  | 65 | ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__); | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | entry = staticInfo.find(ANDROID_LENS_DISTORTION); | 
|  | 69 | if (entry.count == 5) { | 
|  | 70 | mLensDistortion.reserve(5); | 
|  | 71 | mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5); | 
|  | 72 | } else { | 
|  | 73 | ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__); | 
|  | 74 | } | 
|  | 75 |  | 
|  | 76 | entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES); | 
|  | 77 | for (size_t i = 0; i < entry.count; ++i) { | 
|  | 78 | uint8_t capability = entry.data.u8[i]; | 
|  | 79 | if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) { | 
|  | 80 | mIsLogicalCamera = true; | 
|  | 81 | break; | 
|  | 82 | } | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes); | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 86 |  | 
|  | 87 | mDepthPhotoLibHandle = dlopen(camera3::kDepthPhotoLibrary, RTLD_NOW | RTLD_LOCAL); | 
|  | 88 | if (mDepthPhotoLibHandle != nullptr) { | 
|  | 89 | mDepthPhotoProcess = reinterpret_cast<camera3::process_depth_photo_frame> ( | 
|  | 90 | dlsym(mDepthPhotoLibHandle, camera3::kDepthPhotoProcessFunction)); | 
|  | 91 | if (mDepthPhotoProcess == nullptr) { | 
|  | 92 | ALOGE("%s: Failed to link to depth photo process function: %s", __FUNCTION__, | 
|  | 93 | dlerror()); | 
|  | 94 | } | 
|  | 95 | } else { | 
|  | 96 | ALOGE("%s: Failed to link to depth photo library: %s", __FUNCTION__, dlerror()); | 
|  | 97 | } | 
|  | 98 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 99 | } | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | DepthCompositeStream::~DepthCompositeStream() { | 
|  | 103 | mBlobConsumer.clear(), | 
|  | 104 | mBlobSurface.clear(), | 
|  | 105 | mBlobStreamId = -1; | 
|  | 106 | mBlobSurfaceId = -1; | 
|  | 107 | mDepthConsumer.clear(); | 
|  | 108 | mDepthSurface.clear(); | 
|  | 109 | mDepthConsumer = nullptr; | 
|  | 110 | mDepthSurface = nullptr; | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 111 | if (mDepthPhotoLibHandle != nullptr) { | 
|  | 112 | dlclose(mDepthPhotoLibHandle); | 
|  | 113 | mDepthPhotoLibHandle = nullptr; | 
|  | 114 | } | 
|  | 115 | mDepthPhotoProcess = nullptr; | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 116 | } | 
|  | 117 |  | 
|  | 118 | void DepthCompositeStream::compilePendingInputLocked() { | 
|  | 119 | CpuConsumer::LockedBuffer imgBuffer; | 
|  | 120 |  | 
|  | 121 | while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) { | 
|  | 122 | auto it = mInputJpegBuffers.begin(); | 
|  | 123 | auto res = mBlobConsumer->lockNextBuffer(&imgBuffer); | 
|  | 124 | if (res == NOT_ENOUGH_DATA) { | 
|  | 125 | // Can not lock any more buffers. | 
|  | 126 | break; | 
|  | 127 | } else if (res != OK) { | 
|  | 128 | ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__, | 
|  | 129 | strerror(-res), res); | 
|  | 130 | mPendingInputFrames[*it].error = true; | 
| Greg Kaiser | 07095df | 2019-01-29 06:28:58 -0800 | [diff] [blame] | 131 | mInputJpegBuffers.erase(it); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 132 | continue; | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | if (*it != imgBuffer.timestamp) { | 
|  | 136 | ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with " | 
|  | 137 | "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp); | 
|  | 138 | } | 
|  | 139 |  | 
|  | 140 | if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) && | 
|  | 141 | (mPendingInputFrames[imgBuffer.timestamp].error)) { | 
|  | 142 | mBlobConsumer->unlockBuffer(imgBuffer); | 
|  | 143 | } else { | 
|  | 144 | mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer; | 
|  | 145 | mBlobBufferAcquired = true; | 
|  | 146 | } | 
|  | 147 | mInputJpegBuffers.erase(it); | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) { | 
|  | 151 | auto it = mInputDepthBuffers.begin(); | 
|  | 152 | auto res = mDepthConsumer->lockNextBuffer(&imgBuffer); | 
|  | 153 | if (res == NOT_ENOUGH_DATA) { | 
|  | 154 | // Can not lock any more buffers. | 
|  | 155 | break; | 
|  | 156 | } else if (res != OK) { | 
|  | 157 | ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__, | 
|  | 158 | strerror(-res), res); | 
|  | 159 | mPendingInputFrames[*it].error = true; | 
|  | 160 | mInputDepthBuffers.erase(it); | 
|  | 161 | continue; | 
|  | 162 | } | 
|  | 163 |  | 
|  | 164 | if (*it != imgBuffer.timestamp) { | 
|  | 165 | ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with " | 
|  | 166 | "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp); | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) && | 
|  | 170 | (mPendingInputFrames[imgBuffer.timestamp].error)) { | 
|  | 171 | mDepthConsumer->unlockBuffer(imgBuffer); | 
|  | 172 | } else { | 
|  | 173 | mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer; | 
|  | 174 | mDepthBufferAcquired = true; | 
|  | 175 | } | 
|  | 176 | mInputDepthBuffers.erase(it); | 
|  | 177 | } | 
|  | 178 |  | 
|  | 179 | while (!mCaptureResults.empty()) { | 
|  | 180 | auto it = mCaptureResults.begin(); | 
|  | 181 | // Negative timestamp indicates that something went wrong during the capture result | 
|  | 182 | // collection process. | 
|  | 183 | if (it->first >= 0) { | 
|  | 184 | mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second); | 
|  | 185 | mPendingInputFrames[it->first].result = std::get<1>(it->second); | 
|  | 186 | } | 
|  | 187 | mCaptureResults.erase(it); | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | while (!mFrameNumberMap.empty()) { | 
|  | 191 | auto it = mFrameNumberMap.begin(); | 
|  | 192 | mPendingInputFrames[it->second].frameNumber = it->first; | 
|  | 193 | mFrameNumberMap.erase(it); | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | auto it = mErrorFrameNumbers.begin(); | 
|  | 197 | while (it != mErrorFrameNumbers.end()) { | 
|  | 198 | bool frameFound = false; | 
|  | 199 | for (auto &inputFrame : mPendingInputFrames) { | 
|  | 200 | if (inputFrame.second.frameNumber == *it) { | 
|  | 201 | inputFrame.second.error = true; | 
|  | 202 | frameFound = true; | 
|  | 203 | break; | 
|  | 204 | } | 
|  | 205 | } | 
|  | 206 |  | 
|  | 207 | if (frameFound) { | 
|  | 208 | it = mErrorFrameNumbers.erase(it); | 
|  | 209 | } else { | 
|  | 210 | ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__, | 
|  | 211 | *it); | 
|  | 212 | it++; | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 | } | 
|  | 216 |  | 
|  | 217 | bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) { | 
|  | 218 | if (currentTs == nullptr) { | 
|  | 219 | return false; | 
|  | 220 | } | 
|  | 221 |  | 
|  | 222 | bool newInputAvailable = false; | 
|  | 223 | for (const auto& it : mPendingInputFrames) { | 
|  | 224 | if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) && | 
|  | 225 | (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) { | 
|  | 226 | *currentTs = it.first; | 
|  | 227 | newInputAvailable = true; | 
|  | 228 | } | 
|  | 229 | } | 
|  | 230 |  | 
|  | 231 | return newInputAvailable; | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) { | 
|  | 235 | int64_t ret = -1; | 
|  | 236 | if (currentTs == nullptr) { | 
|  | 237 | return ret; | 
|  | 238 | } | 
|  | 239 |  | 
|  | 240 | for (const auto& it : mPendingInputFrames) { | 
|  | 241 | if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) { | 
|  | 242 | *currentTs = it.first; | 
|  | 243 | ret = it.second.frameNumber; | 
|  | 244 | } | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | return ret; | 
|  | 248 | } | 
|  | 249 |  | 
| Emilian Peev | 90a839f | 2019-10-02 15:12:50 -0700 | [diff] [blame] | 250 | status_t DepthCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) { | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 251 | status_t res; | 
|  | 252 | sp<ANativeWindow> outputANW = mOutputSurface; | 
|  | 253 | ANativeWindowBuffer *anb; | 
|  | 254 | int fenceFd; | 
|  | 255 | void *dstBuffer; | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 256 |  | 
|  | 257 | auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data, | 
|  | 258 | inputFrame.jpegBuffer.width); | 
|  | 259 | if (jpegSize == 0) { | 
|  | 260 | ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__); | 
|  | 261 | jpegSize = inputFrame.jpegBuffer.width; | 
|  | 262 | } | 
|  | 263 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 264 | size_t maxDepthJpegSize; | 
|  | 265 | if (mMaxJpegSize > 0) { | 
|  | 266 | maxDepthJpegSize = mMaxJpegSize; | 
|  | 267 | } else { | 
|  | 268 | maxDepthJpegSize = std::max<size_t> (jpegSize, | 
|  | 269 | inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2); | 
|  | 270 | } | 
|  | 271 | uint8_t jpegQuality = 100; | 
|  | 272 | auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY); | 
|  | 273 | if (entry.count > 0) { | 
|  | 274 | jpegQuality = entry.data.u8[0]; | 
|  | 275 | } | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 276 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 277 | // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in | 
|  | 278 | // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need | 
|  | 279 | // max jpeg size. | 
|  | 280 | size_t finalJpegBufferSize = maxDepthJpegSize * 3; | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 281 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 282 | if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1)) | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 283 | != OK) { | 
|  | 284 | ALOGE("%s: Unable to configure stream buffer dimensions" | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 285 | " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 286 | return res; | 
|  | 287 | } | 
|  | 288 |  | 
|  | 289 | res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd); | 
|  | 290 | if (res != OK) { | 
|  | 291 | ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res), | 
|  | 292 | res); | 
|  | 293 | return res; | 
|  | 294 | } | 
|  | 295 |  | 
|  | 296 | sp<GraphicBuffer> gb = GraphicBuffer::from(anb); | 
|  | 297 | res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd); | 
|  | 298 | if (res != OK) { | 
|  | 299 | ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__, | 
|  | 300 | strerror(-res), res); | 
|  | 301 | outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 302 | return res; | 
|  | 303 | } | 
|  | 304 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 305 | if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) { | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 306 | ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__, | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 307 | gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 308 | outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 309 | return BAD_VALUE; | 
|  | 310 | } | 
|  | 311 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 312 | DepthPhotoInputFrame depthPhoto; | 
|  | 313 | depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data); | 
|  | 314 | depthPhoto.mMainJpegWidth = mBlobWidth; | 
|  | 315 | depthPhoto.mMainJpegHeight = mBlobHeight; | 
|  | 316 | depthPhoto.mMainJpegSize = jpegSize; | 
|  | 317 | depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data); | 
|  | 318 | depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width; | 
|  | 319 | depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height; | 
|  | 320 | depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride; | 
|  | 321 | depthPhoto.mJpegQuality = jpegQuality; | 
|  | 322 | depthPhoto.mIsLogical = mIsLogicalCamera; | 
|  | 323 | depthPhoto.mMaxJpegSize = maxDepthJpegSize; | 
|  | 324 | // The camera intrinsic calibration layout is as follows: | 
|  | 325 | // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew] | 
| Emilian Peev | 94c9802 | 2019-06-19 09:11:51 -0700 | [diff] [blame] | 326 | if (mIntrinsicCalibration.size() == 5) { | 
|  | 327 | memcpy(depthPhoto.mIntrinsicCalibration, mIntrinsicCalibration.data(), | 
|  | 328 | sizeof(depthPhoto.mIntrinsicCalibration)); | 
|  | 329 | depthPhoto.mIsIntrinsicCalibrationValid = 1; | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 330 | } else { | 
| Emilian Peev | 94c9802 | 2019-06-19 09:11:51 -0700 | [diff] [blame] | 331 | depthPhoto.mIsIntrinsicCalibrationValid = 0; | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 332 | } | 
|  | 333 | // The camera lens distortion contains the following lens correction coefficients. | 
|  | 334 | // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5] | 
|  | 335 | if (mLensDistortion.size() == 5) { | 
|  | 336 | memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(), | 
|  | 337 | sizeof(depthPhoto.mLensDistortion)); | 
|  | 338 | depthPhoto.mIsLensDistortionValid = 1; | 
|  | 339 | } else { | 
|  | 340 | depthPhoto.mIsLensDistortionValid = 0; | 
|  | 341 | } | 
| Emilian Peev | 06af8c9 | 2019-02-07 12:34:41 -0800 | [diff] [blame] | 342 | entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION); | 
|  | 343 | if (entry.count > 0) { | 
|  | 344 | // The camera jpeg orientation values must be within [0, 90, 180, 270]. | 
|  | 345 | switch (entry.data.i32[0]) { | 
|  | 346 | case 0: | 
|  | 347 | case 90: | 
|  | 348 | case 180: | 
|  | 349 | case 270: | 
|  | 350 | depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]); | 
|  | 351 | break; | 
|  | 352 | default: | 
|  | 353 | ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees", | 
|  | 354 | __FUNCTION__, entry.data.i32[0]); | 
|  | 355 | } | 
|  | 356 | } | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 357 |  | 
|  | 358 | size_t actualJpegSize = 0; | 
|  | 359 | res = mDepthPhotoProcess(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize); | 
|  | 360 | if (res != 0) { | 
|  | 361 | ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res); | 
|  | 362 | outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 363 | return res; | 
|  | 364 | } | 
|  | 365 |  | 
|  | 366 | size_t finalJpegSize = actualJpegSize + sizeof(struct camera3_jpeg_blob); | 
|  | 367 | if (finalJpegSize > finalJpegBufferSize) { | 
|  | 368 | ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__); | 
|  | 369 | outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 370 | return NO_MEMORY; | 
|  | 371 | } | 
|  | 372 |  | 
| Emilian Peev | 90a839f | 2019-10-02 15:12:50 -0700 | [diff] [blame] | 373 | res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts); | 
|  | 374 | if (res != OK) { | 
|  | 375 | ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__, | 
|  | 376 | getStreamId(), strerror(-res), res); | 
|  | 377 | return res; | 
|  | 378 | } | 
|  | 379 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 380 | ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 381 | uint8_t* header = static_cast<uint8_t *> (dstBuffer) + | 
|  | 382 | (gb->getWidth() - sizeof(struct camera3_jpeg_blob)); | 
|  | 383 | struct camera3_jpeg_blob *blob = reinterpret_cast<struct camera3_jpeg_blob*> (header); | 
|  | 384 | blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID; | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 385 | blob->jpeg_size = actualJpegSize; | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 386 | outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1); | 
|  | 387 |  | 
|  | 388 | return res; | 
|  | 389 | } | 
|  | 390 |  | 
|  | 391 | void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) { | 
|  | 392 | if (inputFrame == nullptr) { | 
|  | 393 | return; | 
|  | 394 | } | 
|  | 395 |  | 
|  | 396 | if (inputFrame->depthBuffer.data != nullptr) { | 
|  | 397 | mDepthConsumer->unlockBuffer(inputFrame->depthBuffer); | 
|  | 398 | inputFrame->depthBuffer.data = nullptr; | 
|  | 399 | mDepthBufferAcquired = false; | 
|  | 400 | } | 
|  | 401 |  | 
|  | 402 | if (inputFrame->jpegBuffer.data != nullptr) { | 
|  | 403 | mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer); | 
|  | 404 | inputFrame->jpegBuffer.data = nullptr; | 
|  | 405 | mBlobBufferAcquired = false; | 
|  | 406 | } | 
|  | 407 |  | 
|  | 408 | if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) { | 
|  | 409 | notifyError(inputFrame->frameNumber); | 
|  | 410 | inputFrame->errorNotified = true; | 
|  | 411 | } | 
|  | 412 | } | 
|  | 413 |  | 
|  | 414 | void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) { | 
|  | 415 | auto it = mPendingInputFrames.begin(); | 
|  | 416 | while (it != mPendingInputFrames.end()) { | 
|  | 417 | if (it->first <= currentTs) { | 
|  | 418 | releaseInputFrameLocked(&it->second); | 
|  | 419 | it = mPendingInputFrames.erase(it); | 
|  | 420 | } else { | 
|  | 421 | it++; | 
|  | 422 | } | 
|  | 423 | } | 
|  | 424 | } | 
|  | 425 |  | 
|  | 426 | bool DepthCompositeStream::threadLoop() { | 
|  | 427 | int64_t currentTs = INT64_MAX; | 
|  | 428 | bool newInputAvailable = false; | 
|  | 429 |  | 
|  | 430 | { | 
|  | 431 | Mutex::Autolock l(mMutex); | 
|  | 432 |  | 
|  | 433 | if (mErrorState) { | 
|  | 434 | // In case we landed in error state, return any pending buffers and | 
|  | 435 | // halt all further processing. | 
|  | 436 | compilePendingInputLocked(); | 
|  | 437 | releaseInputFramesLocked(currentTs); | 
|  | 438 | return false; | 
|  | 439 | } | 
|  | 440 |  | 
|  | 441 | while (!newInputAvailable) { | 
|  | 442 | compilePendingInputLocked(); | 
|  | 443 | newInputAvailable = getNextReadyInputLocked(¤tTs); | 
|  | 444 | if (!newInputAvailable) { | 
|  | 445 | auto failingFrameNumber = getNextFailingInputLocked(¤tTs); | 
|  | 446 | if (failingFrameNumber >= 0) { | 
|  | 447 | // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is | 
|  | 448 | // possible for two internal stream buffers to fail. In such scenario the | 
|  | 449 | // composite stream should notify the client about a stream buffer error only | 
|  | 450 | // once and this information is kept within 'errorNotified'. | 
|  | 451 | // Any present failed input frames will be removed on a subsequent call to | 
|  | 452 | // 'releaseInputFramesLocked()'. | 
|  | 453 | releaseInputFrameLocked(&mPendingInputFrames[currentTs]); | 
|  | 454 | currentTs = INT64_MAX; | 
|  | 455 | } | 
|  | 456 |  | 
|  | 457 | auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration); | 
|  | 458 | if (ret == TIMED_OUT) { | 
|  | 459 | return true; | 
|  | 460 | } else if (ret != OK) { | 
|  | 461 | ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__, | 
|  | 462 | strerror(-ret), ret); | 
|  | 463 | return false; | 
|  | 464 | } | 
|  | 465 | } | 
|  | 466 | } | 
|  | 467 | } | 
|  | 468 |  | 
| Emilian Peev | 90a839f | 2019-10-02 15:12:50 -0700 | [diff] [blame] | 469 | auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]); | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 470 | Mutex::Autolock l(mMutex); | 
|  | 471 | if (res != OK) { | 
|  | 472 | ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__, | 
|  | 473 | currentTs, strerror(-res), res); | 
|  | 474 | mPendingInputFrames[currentTs].error = true; | 
|  | 475 | } | 
|  | 476 |  | 
|  | 477 | releaseInputFramesLocked(currentTs); | 
|  | 478 |  | 
|  | 479 | return true; | 
|  | 480 | } | 
|  | 481 |  | 
|  | 482 | bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) { | 
|  | 483 | ANativeWindow *anw = surface.get(); | 
|  | 484 | status_t err; | 
|  | 485 | int format; | 
|  | 486 | if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) { | 
|  | 487 | String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err), | 
|  | 488 | err); | 
|  | 489 | ALOGE("%s: %s", __FUNCTION__, msg.string()); | 
|  | 490 | return false; | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | int dataspace; | 
|  | 494 | if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) { | 
|  | 495 | String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err), | 
|  | 496 | err); | 
|  | 497 | ALOGE("%s: %s", __FUNCTION__, msg.string()); | 
|  | 498 | return false; | 
|  | 499 | } | 
|  | 500 |  | 
|  | 501 | if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) { | 
|  | 502 | return true; | 
|  | 503 | } | 
|  | 504 |  | 
|  | 505 | return false; | 
|  | 506 | } | 
|  | 507 |  | 
|  | 508 | status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers, | 
|  | 509 | bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format, | 
|  | 510 | camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId, | 
|  | 511 | std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) { | 
|  | 512 | if (mSupportedDepthSizes.empty()) { | 
|  | 513 | ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__); | 
|  | 514 | return INVALID_OPERATION; | 
|  | 515 | } | 
|  | 516 |  | 
|  | 517 | size_t depthWidth, depthHeight; | 
|  | 518 | auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight); | 
|  | 519 | if (ret != OK) { | 
|  | 520 | ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__); | 
|  | 521 | return ret; | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | sp<CameraDeviceBase> device = mDevice.promote(); | 
|  | 525 | if (!device.get()) { | 
|  | 526 | ALOGE("%s: Invalid camera device!", __FUNCTION__); | 
|  | 527 | return NO_INIT; | 
|  | 528 | } | 
|  | 529 |  | 
|  | 530 | sp<IGraphicBufferProducer> producer; | 
|  | 531 | sp<IGraphicBufferConsumer> consumer; | 
|  | 532 | BufferQueue::createBufferQueue(&producer, &consumer); | 
|  | 533 | mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true); | 
|  | 534 | mBlobConsumer->setFrameAvailableListener(this); | 
|  | 535 | mBlobConsumer->setName(String8("Camera3-JpegCompositeStream")); | 
|  | 536 | mBlobSurface = new Surface(producer); | 
|  | 537 |  | 
|  | 538 | ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation, | 
|  | 539 | id, physicalCameraId, surfaceIds); | 
|  | 540 | if (ret == OK) { | 
|  | 541 | mBlobStreamId = *id; | 
|  | 542 | mBlobSurfaceId = (*surfaceIds)[0]; | 
|  | 543 | mOutputSurface = consumers[0]; | 
|  | 544 | } else { | 
|  | 545 | return ret; | 
|  | 546 | } | 
|  | 547 |  | 
|  | 548 | BufferQueue::createBufferQueue(&producer, &consumer); | 
|  | 549 | mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true); | 
|  | 550 | mDepthConsumer->setFrameAvailableListener(this); | 
|  | 551 | mDepthConsumer->setName(String8("Camera3-DepthCompositeStream")); | 
|  | 552 | mDepthSurface = new Surface(producer); | 
|  | 553 | std::vector<int> depthSurfaceId; | 
|  | 554 | ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat, | 
|  | 555 | kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId); | 
|  | 556 | if (ret == OK) { | 
|  | 557 | mDepthSurfaceId = depthSurfaceId[0]; | 
|  | 558 | } else { | 
|  | 559 | return ret; | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | ret = registerCompositeStreamListener(getStreamId()); | 
|  | 563 | if (ret != OK) { | 
|  | 564 | ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__); | 
|  | 565 | return ret; | 
|  | 566 | } | 
|  | 567 |  | 
|  | 568 | ret = registerCompositeStreamListener(mDepthStreamId); | 
|  | 569 | if (ret != OK) { | 
|  | 570 | ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__); | 
|  | 571 | return ret; | 
|  | 572 | } | 
|  | 573 |  | 
|  | 574 | mBlobWidth = width; | 
|  | 575 | mBlobHeight = height; | 
|  | 576 |  | 
|  | 577 | return ret; | 
|  | 578 | } | 
|  | 579 |  | 
|  | 580 | status_t DepthCompositeStream::configureStream() { | 
|  | 581 | if (isRunning()) { | 
|  | 582 | // Processing thread is already running, nothing more to do. | 
|  | 583 | return NO_ERROR; | 
|  | 584 | } | 
|  | 585 |  | 
| Emilian Peev | cbf174b | 2019-01-25 14:38:59 -0800 | [diff] [blame] | 586 | if ((mDepthPhotoLibHandle == nullptr) || (mDepthPhotoProcess == nullptr)) { | 
|  | 587 | ALOGE("%s: Depth photo library is not present!", __FUNCTION__); | 
|  | 588 | return NO_INIT; | 
|  | 589 | } | 
|  | 590 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 591 | if (mOutputSurface.get() == nullptr) { | 
|  | 592 | ALOGE("%s: No valid output surface set!", __FUNCTION__); | 
|  | 593 | return NO_INIT; | 
|  | 594 | } | 
|  | 595 |  | 
|  | 596 | auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener); | 
|  | 597 | if (res != OK) { | 
|  | 598 | ALOGE("%s: Unable to connect to native window for stream %d", | 
|  | 599 | __FUNCTION__, mBlobStreamId); | 
|  | 600 | return res; | 
|  | 601 | } | 
|  | 602 |  | 
|  | 603 | if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB)) | 
|  | 604 | != OK) { | 
|  | 605 | ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__, | 
|  | 606 | mBlobStreamId); | 
|  | 607 | return res; | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 | int maxProducerBuffers; | 
|  | 611 | ANativeWindow *anw = mBlobSurface.get(); | 
|  | 612 | if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) { | 
|  | 613 | ALOGE("%s: Unable to query consumer undequeued" | 
|  | 614 | " buffer count for stream %d", __FUNCTION__, mBlobStreamId); | 
|  | 615 | return res; | 
|  | 616 | } | 
|  | 617 |  | 
|  | 618 | ANativeWindow *anwConsumer = mOutputSurface.get(); | 
|  | 619 | int maxConsumerBuffers; | 
|  | 620 | if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, | 
|  | 621 | &maxConsumerBuffers)) != OK) { | 
|  | 622 | ALOGE("%s: Unable to query consumer undequeued" | 
|  | 623 | " buffer count for stream %d", __FUNCTION__, mBlobStreamId); | 
|  | 624 | return res; | 
|  | 625 | } | 
|  | 626 |  | 
|  | 627 | if ((res = native_window_set_buffer_count( | 
|  | 628 | anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) { | 
|  | 629 | ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId); | 
|  | 630 | return res; | 
|  | 631 | } | 
|  | 632 |  | 
|  | 633 | run("DepthCompositeStreamProc"); | 
|  | 634 |  | 
|  | 635 | return NO_ERROR; | 
|  | 636 | } | 
|  | 637 |  | 
|  | 638 | status_t DepthCompositeStream::deleteInternalStreams() { | 
|  | 639 | // The 'CameraDeviceClient' parent will delete the blob stream | 
|  | 640 | requestExit(); | 
|  | 641 |  | 
|  | 642 | auto ret = join(); | 
|  | 643 | if (ret != OK) { | 
|  | 644 | ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__, | 
|  | 645 | strerror(-ret), ret); | 
|  | 646 | } | 
|  | 647 |  | 
|  | 648 | sp<CameraDeviceBase> device = mDevice.promote(); | 
|  | 649 | if (!device.get()) { | 
|  | 650 | ALOGE("%s: Invalid camera device!", __FUNCTION__); | 
|  | 651 | return NO_INIT; | 
|  | 652 | } | 
|  | 653 |  | 
|  | 654 | if (mDepthStreamId >= 0) { | 
|  | 655 | ret = device->deleteStream(mDepthStreamId); | 
|  | 656 | mDepthStreamId = -1; | 
|  | 657 | } | 
|  | 658 |  | 
| Shuzhen Wang | 2c54504 | 2019-02-07 10:27:35 -0800 | [diff] [blame] | 659 | if (mOutputSurface != nullptr) { | 
|  | 660 | mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA); | 
|  | 661 | mOutputSurface.clear(); | 
|  | 662 | } | 
|  | 663 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 664 | return ret; | 
|  | 665 | } | 
|  | 666 |  | 
|  | 667 | void DepthCompositeStream::onFrameAvailable(const BufferItem& item) { | 
|  | 668 | if (item.mDataSpace == kJpegDataSpace) { | 
|  | 669 | ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!", | 
|  | 670 | __func__, ns2ms(item.mTimestamp)); | 
|  | 671 |  | 
|  | 672 | Mutex::Autolock l(mMutex); | 
|  | 673 | if (!mErrorState) { | 
|  | 674 | mInputJpegBuffers.push_back(item.mTimestamp); | 
|  | 675 | mInputReadyCondition.signal(); | 
|  | 676 | } | 
|  | 677 | } else if (item.mDataSpace == kDepthMapDataSpace) { | 
|  | 678 | ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__, | 
|  | 679 | ns2ms(item.mTimestamp)); | 
|  | 680 |  | 
|  | 681 | Mutex::Autolock l(mMutex); | 
|  | 682 | if (!mErrorState) { | 
|  | 683 | mInputDepthBuffers.push_back(item.mTimestamp); | 
|  | 684 | mInputReadyCondition.signal(); | 
|  | 685 | } | 
|  | 686 | } else { | 
|  | 687 | ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace); | 
|  | 688 | } | 
|  | 689 | } | 
|  | 690 |  | 
|  | 691 | status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap, | 
|  | 692 | Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) { | 
|  | 693 | if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) { | 
|  | 694 | (*outSurfaceMap)[mDepthStreamId] = std::vector<size_t>(); | 
|  | 695 | outputStreamIds->push_back(mDepthStreamId); | 
|  | 696 | } | 
|  | 697 | (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId); | 
|  | 698 |  | 
|  | 699 | if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) { | 
|  | 700 | (*outSurfaceMap)[mBlobStreamId] = std::vector<size_t>(); | 
|  | 701 | outputStreamIds->push_back(mBlobStreamId); | 
|  | 702 | } | 
|  | 703 | (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId); | 
|  | 704 |  | 
|  | 705 | if (currentStreamId != nullptr) { | 
|  | 706 | *currentStreamId = mBlobStreamId; | 
|  | 707 | } | 
|  | 708 |  | 
|  | 709 | return NO_ERROR; | 
|  | 710 | } | 
|  | 711 |  | 
|  | 712 | void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) { | 
|  | 713 | // Processing can continue even in case of result errors. | 
|  | 714 | // At the moment depth composite stream processing relies mainly on static camera | 
|  | 715 | // characteristics data. The actual result data can be used for the jpeg quality but | 
|  | 716 | // in case it is absent we can default to maximum. | 
|  | 717 | eraseResult(resultExtras.frameNumber); | 
|  | 718 | } | 
|  | 719 |  | 
|  | 720 | bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) { | 
|  | 721 | bool ret = false; | 
|  | 722 | // Buffer errors concerning internal composite streams should not be directly visible to | 
|  | 723 | // camera clients. They must only receive a single buffer error with the public composite | 
|  | 724 | // stream id. | 
|  | 725 | if ((resultExtras.errorStreamId == mDepthStreamId) || | 
|  | 726 | (resultExtras.errorStreamId == mBlobStreamId)) { | 
|  | 727 | flagAnErrorFrameNumber(resultExtras.frameNumber); | 
|  | 728 | ret = true; | 
|  | 729 | } | 
|  | 730 |  | 
|  | 731 | return ret; | 
|  | 732 | } | 
|  | 733 |  | 
|  | 734 | status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height, | 
|  | 735 | const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes, | 
|  | 736 | size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) { | 
|  | 737 | if ((depthWidth == nullptr) || (depthHeight == nullptr)) { | 
|  | 738 | return BAD_VALUE; | 
|  | 739 | } | 
|  | 740 |  | 
|  | 741 | float arTol = CameraProviderManager::kDepthARTolerance; | 
|  | 742 | *depthWidth = *depthHeight = 0; | 
|  | 743 |  | 
|  | 744 | float aspectRatio = static_cast<float> (width) / static_cast<float> (height); | 
|  | 745 | for (const auto& it : supporedDepthSizes) { | 
|  | 746 | auto currentWidth = std::get<0>(it); | 
|  | 747 | auto currentHeight = std::get<1>(it); | 
|  | 748 | if ((currentWidth == width) && (currentHeight == height)) { | 
|  | 749 | *depthWidth = width; | 
|  | 750 | *depthHeight = height; | 
|  | 751 | break; | 
|  | 752 | } else { | 
|  | 753 | float currentRatio = static_cast<float> (currentWidth) / | 
|  | 754 | static_cast<float> (currentHeight); | 
|  | 755 | auto currentSize = currentWidth * currentHeight; | 
|  | 756 | auto oldSize = (*depthWidth) * (*depthHeight); | 
|  | 757 | if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) { | 
|  | 758 | *depthWidth = currentWidth; | 
|  | 759 | *depthHeight = currentHeight; | 
|  | 760 | } | 
|  | 761 | } | 
|  | 762 | } | 
|  | 763 |  | 
|  | 764 | return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE; | 
|  | 765 | } | 
|  | 766 |  | 
|  | 767 | void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch, | 
|  | 768 | std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) { | 
|  | 769 | if (depthSizes == nullptr) { | 
|  | 770 | return; | 
|  | 771 | } | 
|  | 772 |  | 
|  | 773 | auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS); | 
|  | 774 | if (entry.count > 0) { | 
|  | 775 | // Depth stream dimensions have four int32_t components | 
|  | 776 | // (pixelformat, width, height, type) | 
|  | 777 | size_t entryCount = entry.count / 4; | 
|  | 778 | depthSizes->reserve(entryCount); | 
|  | 779 | for (size_t i = 0; i < entry.count; i += 4) { | 
|  | 780 | if ((entry.data.i32[i] == kDepthMapPixelFormat) && | 
|  | 781 | (entry.data.i32[i+3] == | 
|  | 782 | ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) { | 
|  | 783 | depthSizes->push_back(std::make_tuple(entry.data.i32[i+1], | 
|  | 784 | entry.data.i32[i+2])); | 
|  | 785 | } | 
|  | 786 | } | 
|  | 787 | } | 
|  | 788 | } | 
|  | 789 |  | 
|  | 790 | status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo, | 
|  | 791 | const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) { | 
|  | 792 | if (compositeOutput == nullptr) { | 
|  | 793 | return BAD_VALUE; | 
|  | 794 | } | 
|  | 795 |  | 
|  | 796 | std::vector<std::tuple<size_t, size_t>> depthSizes; | 
|  | 797 | getSupportedDepthSizes(ch, &depthSizes); | 
|  | 798 | if (depthSizes.empty()) { | 
|  | 799 | ALOGE("%s: No depth stream configurations present", __FUNCTION__); | 
|  | 800 | return BAD_VALUE; | 
|  | 801 | } | 
|  | 802 |  | 
|  | 803 | size_t depthWidth, depthHeight; | 
|  | 804 | auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth, | 
|  | 805 | &depthHeight); | 
|  | 806 | if (ret != OK) { | 
|  | 807 | ALOGE("%s: No matching depth stream size found", __FUNCTION__); | 
|  | 808 | return ret; | 
|  | 809 | } | 
|  | 810 |  | 
|  | 811 | compositeOutput->clear(); | 
|  | 812 | compositeOutput->insert(compositeOutput->end(), 2, streamInfo); | 
|  | 813 |  | 
|  | 814 | // Jpeg/Blob stream info | 
|  | 815 | (*compositeOutput)[0].dataSpace = kJpegDataSpace; | 
|  | 816 | (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN; | 
|  | 817 |  | 
|  | 818 | // Depth stream info | 
|  | 819 | (*compositeOutput)[1].width = depthWidth; | 
|  | 820 | (*compositeOutput)[1].height = depthHeight; | 
|  | 821 | (*compositeOutput)[1].format = kDepthMapPixelFormat; | 
|  | 822 | (*compositeOutput)[1].dataSpace = kDepthMapDataSpace; | 
|  | 823 | (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN; | 
|  | 824 |  | 
|  | 825 | return NO_ERROR; | 
|  | 826 | } | 
|  | 827 |  | 
| Emilian Peev | 538c90e | 2018-12-17 18:03:19 +0000 | [diff] [blame] | 828 | }; // namespace camera3 | 
|  | 829 | }; // namespace android |