blob: 19b54e09176ae41b8611f3e00fd768f3a581eb61 [file] [log] [blame]
Emilian Peev538c90e2018-12-17 18:03:19 +00001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "Camera3-DepthCompositeStream"
18#define ATRACE_TAG ATRACE_TAG_CAMERA
19//#define LOG_NDEBUG 0
20
21#include "api1/client2/JpegProcessor.h"
22#include "common/CameraProviderManager.h"
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -080023#include "utils/SessionConfigurationUtils.h"
Emilian Peev538c90e2018-12-17 18:03:19 +000024#include <gui/Surface.h>
25#include <utils/Log.h>
26#include <utils/Trace.h>
27
28#include "DepthCompositeStream.h"
29
Emilian Peev538c90e2018-12-17 18:03:19 +000030namespace android {
31namespace camera3 {
32
Shuzhen Wange8675782019-12-05 09:12:14 -080033DepthCompositeStream::DepthCompositeStream(sp<CameraDeviceBase> device,
Emilian Peev538c90e2018-12-17 18:03:19 +000034 wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
35 CompositeStream(device, cb),
36 mBlobStreamId(-1),
37 mBlobSurfaceId(-1),
38 mDepthStreamId(-1),
39 mDepthSurfaceId(-1),
40 mBlobWidth(0),
41 mBlobHeight(0),
42 mDepthBufferAcquired(false),
43 mBlobBufferAcquired(false),
44 mProducerListener(new ProducerListener()),
45 mMaxJpegSize(-1),
Emilian Peev29e9ec12020-01-02 12:43:50 -080046 mIsLogicalCamera(false) {
Shuzhen Wange8675782019-12-05 09:12:14 -080047 if (device != nullptr) {
48 CameraMetadata staticInfo = device->info();
Emilian Peev538c90e2018-12-17 18:03:19 +000049 auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
50 if (entry.count > 0) {
51 mMaxJpegSize = entry.data.i32[0];
52 } else {
53 ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
54 }
55
56 entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
57 if (entry.count == 5) {
Emilian Peev94c98022019-06-19 09:11:51 -070058 mIntrinsicCalibration.reserve(5);
59 mIntrinsicCalibration.insert(mIntrinsicCalibration.end(), entry.data.f,
Emilian Peev538c90e2018-12-17 18:03:19 +000060 entry.data.f + 5);
61 } else {
62 ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
63 }
64
65 entry = staticInfo.find(ANDROID_LENS_DISTORTION);
66 if (entry.count == 5) {
67 mLensDistortion.reserve(5);
68 mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
69 } else {
70 ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
71 }
72
73 entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
74 for (size_t i = 0; i < entry.count; ++i) {
75 uint8_t capability = entry.data.u8[i];
76 if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
77 mIsLogicalCamera = true;
78 break;
79 }
80 }
81
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -080082 getSupportedDepthSizes(staticInfo, /*maxResolution*/false, &mSupportedDepthSizes);
83 if (SessionConfigurationUtils::isUltraHighResolutionSensor(staticInfo)) {
84 getSupportedDepthSizes(staticInfo, true, &mSupportedDepthSizesMaximumResolution);
85 }
Emilian Peev538c90e2018-12-17 18:03:19 +000086 }
87}
88
89DepthCompositeStream::~DepthCompositeStream() {
90 mBlobConsumer.clear(),
91 mBlobSurface.clear(),
92 mBlobStreamId = -1;
93 mBlobSurfaceId = -1;
94 mDepthConsumer.clear();
95 mDepthSurface.clear();
96 mDepthConsumer = nullptr;
97 mDepthSurface = nullptr;
98}
99
100void DepthCompositeStream::compilePendingInputLocked() {
101 CpuConsumer::LockedBuffer imgBuffer;
102
103 while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
104 auto it = mInputJpegBuffers.begin();
105 auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
106 if (res == NOT_ENOUGH_DATA) {
107 // Can not lock any more buffers.
108 break;
109 } else if (res != OK) {
110 ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
111 strerror(-res), res);
112 mPendingInputFrames[*it].error = true;
Greg Kaiser07095df2019-01-29 06:28:58 -0800113 mInputJpegBuffers.erase(it);
Emilian Peev538c90e2018-12-17 18:03:19 +0000114 continue;
115 }
116
117 if (*it != imgBuffer.timestamp) {
118 ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
119 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
120 }
121
122 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
123 (mPendingInputFrames[imgBuffer.timestamp].error)) {
124 mBlobConsumer->unlockBuffer(imgBuffer);
125 } else {
126 mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
127 mBlobBufferAcquired = true;
128 }
129 mInputJpegBuffers.erase(it);
130 }
131
132 while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
133 auto it = mInputDepthBuffers.begin();
134 auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
135 if (res == NOT_ENOUGH_DATA) {
136 // Can not lock any more buffers.
137 break;
138 } else if (res != OK) {
139 ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
140 strerror(-res), res);
141 mPendingInputFrames[*it].error = true;
142 mInputDepthBuffers.erase(it);
143 continue;
144 }
145
146 if (*it != imgBuffer.timestamp) {
147 ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
148 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
149 }
150
151 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
152 (mPendingInputFrames[imgBuffer.timestamp].error)) {
153 mDepthConsumer->unlockBuffer(imgBuffer);
154 } else {
155 mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
156 mDepthBufferAcquired = true;
157 }
158 mInputDepthBuffers.erase(it);
159 }
160
161 while (!mCaptureResults.empty()) {
162 auto it = mCaptureResults.begin();
163 // Negative timestamp indicates that something went wrong during the capture result
164 // collection process.
165 if (it->first >= 0) {
166 mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
167 mPendingInputFrames[it->first].result = std::get<1>(it->second);
168 }
169 mCaptureResults.erase(it);
170 }
171
172 while (!mFrameNumberMap.empty()) {
173 auto it = mFrameNumberMap.begin();
174 mPendingInputFrames[it->second].frameNumber = it->first;
175 mFrameNumberMap.erase(it);
176 }
177
178 auto it = mErrorFrameNumbers.begin();
179 while (it != mErrorFrameNumbers.end()) {
180 bool frameFound = false;
181 for (auto &inputFrame : mPendingInputFrames) {
182 if (inputFrame.second.frameNumber == *it) {
183 inputFrame.second.error = true;
184 frameFound = true;
185 break;
186 }
187 }
188
189 if (frameFound) {
190 it = mErrorFrameNumbers.erase(it);
191 } else {
192 ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
193 *it);
194 it++;
195 }
196 }
197}
198
199bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
200 if (currentTs == nullptr) {
201 return false;
202 }
203
204 bool newInputAvailable = false;
205 for (const auto& it : mPendingInputFrames) {
206 if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
207 (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
208 *currentTs = it.first;
209 newInputAvailable = true;
210 }
211 }
212
213 return newInputAvailable;
214}
215
216int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
217 int64_t ret = -1;
218 if (currentTs == nullptr) {
219 return ret;
220 }
221
222 for (const auto& it : mPendingInputFrames) {
223 if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
224 *currentTs = it.first;
225 ret = it.second.frameNumber;
226 }
227 }
228
229 return ret;
230}
231
Emilian Peev90a839f2019-10-02 15:12:50 -0700232status_t DepthCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) {
Emilian Peev538c90e2018-12-17 18:03:19 +0000233 status_t res;
234 sp<ANativeWindow> outputANW = mOutputSurface;
235 ANativeWindowBuffer *anb;
236 int fenceFd;
237 void *dstBuffer;
Emilian Peev538c90e2018-12-17 18:03:19 +0000238
239 auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
240 inputFrame.jpegBuffer.width);
241 if (jpegSize == 0) {
242 ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
243 jpegSize = inputFrame.jpegBuffer.width;
244 }
245
Emilian Peev538c90e2018-12-17 18:03:19 +0000246 size_t maxDepthJpegSize;
247 if (mMaxJpegSize > 0) {
248 maxDepthJpegSize = mMaxJpegSize;
249 } else {
250 maxDepthJpegSize = std::max<size_t> (jpegSize,
251 inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
252 }
253 uint8_t jpegQuality = 100;
254 auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
255 if (entry.count > 0) {
256 jpegQuality = entry.data.u8[0];
257 }
Emilian Peev538c90e2018-12-17 18:03:19 +0000258
Emilian Peevcbf174b2019-01-25 14:38:59 -0800259 // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in
260 // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need
261 // max jpeg size.
262 size_t finalJpegBufferSize = maxDepthJpegSize * 3;
Emilian Peev538c90e2018-12-17 18:03:19 +0000263
Emilian Peevcbf174b2019-01-25 14:38:59 -0800264 if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1))
Emilian Peev538c90e2018-12-17 18:03:19 +0000265 != OK) {
266 ALOGE("%s: Unable to configure stream buffer dimensions"
Emilian Peevcbf174b2019-01-25 14:38:59 -0800267 " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId);
Emilian Peev538c90e2018-12-17 18:03:19 +0000268 return res;
269 }
270
271 res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
272 if (res != OK) {
273 ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
274 res);
275 return res;
276 }
277
278 sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
279 res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
280 if (res != OK) {
281 ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
282 strerror(-res), res);
283 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
284 return res;
285 }
286
Emilian Peevcbf174b2019-01-25 14:38:59 -0800287 if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) {
Emilian Peev538c90e2018-12-17 18:03:19 +0000288 ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
Emilian Peevcbf174b2019-01-25 14:38:59 -0800289 gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U);
Emilian Peev538c90e2018-12-17 18:03:19 +0000290 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
291 return BAD_VALUE;
292 }
293
Emilian Peevcbf174b2019-01-25 14:38:59 -0800294 DepthPhotoInputFrame depthPhoto;
295 depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data);
296 depthPhoto.mMainJpegWidth = mBlobWidth;
297 depthPhoto.mMainJpegHeight = mBlobHeight;
298 depthPhoto.mMainJpegSize = jpegSize;
299 depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data);
300 depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width;
301 depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height;
302 depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride;
303 depthPhoto.mJpegQuality = jpegQuality;
304 depthPhoto.mIsLogical = mIsLogicalCamera;
305 depthPhoto.mMaxJpegSize = maxDepthJpegSize;
306 // The camera intrinsic calibration layout is as follows:
307 // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
Emilian Peev94c98022019-06-19 09:11:51 -0700308 if (mIntrinsicCalibration.size() == 5) {
309 memcpy(depthPhoto.mIntrinsicCalibration, mIntrinsicCalibration.data(),
310 sizeof(depthPhoto.mIntrinsicCalibration));
311 depthPhoto.mIsIntrinsicCalibrationValid = 1;
Emilian Peevcbf174b2019-01-25 14:38:59 -0800312 } else {
Emilian Peev94c98022019-06-19 09:11:51 -0700313 depthPhoto.mIsIntrinsicCalibrationValid = 0;
Emilian Peevcbf174b2019-01-25 14:38:59 -0800314 }
315 // The camera lens distortion contains the following lens correction coefficients.
316 // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
317 if (mLensDistortion.size() == 5) {
318 memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(),
319 sizeof(depthPhoto.mLensDistortion));
320 depthPhoto.mIsLensDistortionValid = 1;
321 } else {
322 depthPhoto.mIsLensDistortionValid = 0;
323 }
Emilian Peev06af8c92019-02-07 12:34:41 -0800324 entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
325 if (entry.count > 0) {
326 // The camera jpeg orientation values must be within [0, 90, 180, 270].
327 switch (entry.data.i32[0]) {
328 case 0:
329 case 90:
330 case 180:
331 case 270:
332 depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]);
333 break;
334 default:
335 ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees",
336 __FUNCTION__, entry.data.i32[0]);
337 }
338 }
Emilian Peevcbf174b2019-01-25 14:38:59 -0800339
340 size_t actualJpegSize = 0;
Emilian Peev29e9ec12020-01-02 12:43:50 -0800341 res = processDepthPhotoFrame(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
Emilian Peevcbf174b2019-01-25 14:38:59 -0800342 if (res != 0) {
343 ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res);
344 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
345 return res;
346 }
347
Emilian Peevf4816702020-04-03 15:44:51 -0700348 size_t finalJpegSize = actualJpegSize + sizeof(struct camera_jpeg_blob);
Emilian Peevcbf174b2019-01-25 14:38:59 -0800349 if (finalJpegSize > finalJpegBufferSize) {
350 ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
351 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
352 return NO_MEMORY;
353 }
354
Emilian Peev90a839f2019-10-02 15:12:50 -0700355 res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts);
356 if (res != OK) {
357 ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__,
358 getStreamId(), strerror(-res), res);
359 return res;
360 }
361
Emilian Peevcbf174b2019-01-25 14:38:59 -0800362 ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
Emilian Peev538c90e2018-12-17 18:03:19 +0000363 uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
Emilian Peevf4816702020-04-03 15:44:51 -0700364 (gb->getWidth() - sizeof(struct camera_jpeg_blob));
365 struct camera_jpeg_blob *blob = reinterpret_cast<struct camera_jpeg_blob*> (header);
366 blob->jpeg_blob_id = CAMERA_JPEG_BLOB_ID;
Emilian Peevcbf174b2019-01-25 14:38:59 -0800367 blob->jpeg_size = actualJpegSize;
Emilian Peev538c90e2018-12-17 18:03:19 +0000368 outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
369
370 return res;
371}
372
373void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
374 if (inputFrame == nullptr) {
375 return;
376 }
377
378 if (inputFrame->depthBuffer.data != nullptr) {
379 mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
380 inputFrame->depthBuffer.data = nullptr;
381 mDepthBufferAcquired = false;
382 }
383
384 if (inputFrame->jpegBuffer.data != nullptr) {
385 mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
386 inputFrame->jpegBuffer.data = nullptr;
387 mBlobBufferAcquired = false;
388 }
389
390 if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
Shuzhen Wange8675782019-12-05 09:12:14 -0800391 //TODO: Figure out correct requestId
392 notifyError(inputFrame->frameNumber, -1 /*requestId*/);
Emilian Peev538c90e2018-12-17 18:03:19 +0000393 inputFrame->errorNotified = true;
394 }
395}
396
397void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
398 auto it = mPendingInputFrames.begin();
399 while (it != mPendingInputFrames.end()) {
400 if (it->first <= currentTs) {
401 releaseInputFrameLocked(&it->second);
402 it = mPendingInputFrames.erase(it);
403 } else {
404 it++;
405 }
406 }
407}
408
409bool DepthCompositeStream::threadLoop() {
410 int64_t currentTs = INT64_MAX;
411 bool newInputAvailable = false;
412
413 {
414 Mutex::Autolock l(mMutex);
415
416 if (mErrorState) {
417 // In case we landed in error state, return any pending buffers and
418 // halt all further processing.
419 compilePendingInputLocked();
420 releaseInputFramesLocked(currentTs);
421 return false;
422 }
423
424 while (!newInputAvailable) {
425 compilePendingInputLocked();
426 newInputAvailable = getNextReadyInputLocked(&currentTs);
427 if (!newInputAvailable) {
428 auto failingFrameNumber = getNextFailingInputLocked(&currentTs);
429 if (failingFrameNumber >= 0) {
430 // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
431 // possible for two internal stream buffers to fail. In such scenario the
432 // composite stream should notify the client about a stream buffer error only
433 // once and this information is kept within 'errorNotified'.
434 // Any present failed input frames will be removed on a subsequent call to
435 // 'releaseInputFramesLocked()'.
436 releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
437 currentTs = INT64_MAX;
438 }
439
440 auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
441 if (ret == TIMED_OUT) {
442 return true;
443 } else if (ret != OK) {
444 ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
445 strerror(-ret), ret);
446 return false;
447 }
448 }
449 }
450 }
451
Emilian Peev90a839f2019-10-02 15:12:50 -0700452 auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
Emilian Peev538c90e2018-12-17 18:03:19 +0000453 Mutex::Autolock l(mMutex);
454 if (res != OK) {
455 ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
456 currentTs, strerror(-res), res);
457 mPendingInputFrames[currentTs].error = true;
458 }
459
460 releaseInputFramesLocked(currentTs);
461
462 return true;
463}
464
465bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
466 ANativeWindow *anw = surface.get();
467 status_t err;
468 int format;
469 if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
470 String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
471 err);
472 ALOGE("%s: %s", __FUNCTION__, msg.string());
473 return false;
474 }
475
476 int dataspace;
477 if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
478 String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
479 err);
480 ALOGE("%s: %s", __FUNCTION__, msg.string());
481 return false;
482 }
483
484 if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
485 return true;
486 }
487
488 return false;
489}
490
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800491static bool setContains(std::unordered_set<int32_t> containerSet, int32_t value) {
492 return containerSet.find(value) != containerSet.end();
493}
494
495status_t DepthCompositeStream::checkAndGetMatchingDepthSize(size_t width, size_t height,
496 const std::vector<std::tuple<size_t, size_t>> &depthSizes,
497 const std::vector<std::tuple<size_t, size_t>> &depthSizesMaximumResolution,
498 const std::unordered_set<int32_t> &sensorPixelModesUsed,
499 size_t *depthWidth, size_t *depthHeight) {
500 if (depthWidth == nullptr || depthHeight == nullptr) {
501 return BAD_VALUE;
502 }
503 size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
504 bool hasDefaultSensorPixelMode =
505 setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
506
507 bool hasMaximumResolutionSensorPixelMode =
508 setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION);
509
510 if (!hasDefaultSensorPixelMode && !hasMaximumResolutionSensorPixelMode) {
511 ALOGE("%s: sensor pixel modes don't contain either maximum resolution or default modes",
512 __FUNCTION__);
513 return BAD_VALUE;
514 }
515
516 if (hasDefaultSensorPixelMode) {
517 auto ret = getMatchingDepthSize(width, height, depthSizes, &chosenDepthWidth,
518 &chosenDepthHeight);
519 if (ret != OK) {
520 ALOGE("%s: No matching depth stream size found", __FUNCTION__);
521 return ret;
522 }
523 }
524
525 if (hasMaximumResolutionSensorPixelMode) {
526 size_t depthWidth = 0, depthHeight = 0;
527 auto ret = getMatchingDepthSize(width, height,
528 depthSizesMaximumResolution, &depthWidth, &depthHeight);
529 if (ret != OK) {
530 ALOGE("%s: No matching max resolution depth stream size found", __FUNCTION__);
531 return ret;
532 }
533 // Both matching depth sizes should be the same.
534 if (chosenDepthWidth != 0 && chosenDepthWidth != depthWidth &&
535 chosenDepthHeight != depthHeight) {
536 ALOGE("%s: Maximum resolution sensor pixel mode and default sensor pixel mode don't"
537 " have matching depth sizes", __FUNCTION__);
538 return BAD_VALUE;
539 }
540 if (chosenDepthWidth == 0) {
541 chosenDepthWidth = depthWidth;
542 chosenDepthHeight = depthHeight;
543 }
544 }
545 *depthWidth = chosenDepthWidth;
546 *depthHeight = chosenDepthHeight;
547 return OK;
548}
549
550
Emilian Peev538c90e2018-12-17 18:03:19 +0000551status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
552 bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
Emilian Peevf4816702020-04-03 15:44:51 -0700553 camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800554 const std::unordered_set<int32_t> &sensorPixelModesUsed,
555 std::vector<int> *surfaceIds,
556 int /*streamSetId*/, bool /*isShared*/) {
Emilian Peev538c90e2018-12-17 18:03:19 +0000557 if (mSupportedDepthSizes.empty()) {
558 ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
559 return INVALID_OPERATION;
560 }
561
562 size_t depthWidth, depthHeight;
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800563 auto ret =
564 checkAndGetMatchingDepthSize(width, height, mSupportedDepthSizes,
565 mSupportedDepthSizesMaximumResolution, sensorPixelModesUsed, &depthWidth,
566 &depthHeight);
Emilian Peev538c90e2018-12-17 18:03:19 +0000567 if (ret != OK) {
568 ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
569 return ret;
570 }
571
572 sp<CameraDeviceBase> device = mDevice.promote();
573 if (!device.get()) {
574 ALOGE("%s: Invalid camera device!", __FUNCTION__);
575 return NO_INIT;
576 }
577
578 sp<IGraphicBufferProducer> producer;
579 sp<IGraphicBufferConsumer> consumer;
580 BufferQueue::createBufferQueue(&producer, &consumer);
581 mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
582 mBlobConsumer->setFrameAvailableListener(this);
583 mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
584 mBlobSurface = new Surface(producer);
585
586 ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800587 id, physicalCameraId, sensorPixelModesUsed, surfaceIds);
Emilian Peev538c90e2018-12-17 18:03:19 +0000588 if (ret == OK) {
589 mBlobStreamId = *id;
590 mBlobSurfaceId = (*surfaceIds)[0];
591 mOutputSurface = consumers[0];
592 } else {
593 return ret;
594 }
595
596 BufferQueue::createBufferQueue(&producer, &consumer);
597 mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
598 mDepthConsumer->setFrameAvailableListener(this);
599 mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
600 mDepthSurface = new Surface(producer);
601 std::vector<int> depthSurfaceId;
602 ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800603 kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, sensorPixelModesUsed,
604 &depthSurfaceId);
Emilian Peev538c90e2018-12-17 18:03:19 +0000605 if (ret == OK) {
606 mDepthSurfaceId = depthSurfaceId[0];
607 } else {
608 return ret;
609 }
610
611 ret = registerCompositeStreamListener(getStreamId());
612 if (ret != OK) {
613 ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
614 return ret;
615 }
616
617 ret = registerCompositeStreamListener(mDepthStreamId);
618 if (ret != OK) {
619 ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
620 return ret;
621 }
622
623 mBlobWidth = width;
624 mBlobHeight = height;
625
626 return ret;
627}
628
629status_t DepthCompositeStream::configureStream() {
630 if (isRunning()) {
631 // Processing thread is already running, nothing more to do.
632 return NO_ERROR;
633 }
634
635 if (mOutputSurface.get() == nullptr) {
636 ALOGE("%s: No valid output surface set!", __FUNCTION__);
637 return NO_INIT;
638 }
639
640 auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
641 if (res != OK) {
642 ALOGE("%s: Unable to connect to native window for stream %d",
643 __FUNCTION__, mBlobStreamId);
644 return res;
645 }
646
647 if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
648 != OK) {
649 ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
650 mBlobStreamId);
651 return res;
652 }
653
654 int maxProducerBuffers;
655 ANativeWindow *anw = mBlobSurface.get();
656 if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
657 ALOGE("%s: Unable to query consumer undequeued"
658 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
659 return res;
660 }
661
662 ANativeWindow *anwConsumer = mOutputSurface.get();
663 int maxConsumerBuffers;
664 if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
665 &maxConsumerBuffers)) != OK) {
666 ALOGE("%s: Unable to query consumer undequeued"
667 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
668 return res;
669 }
670
671 if ((res = native_window_set_buffer_count(
672 anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
673 ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
674 return res;
675 }
676
677 run("DepthCompositeStreamProc");
678
679 return NO_ERROR;
680}
681
682status_t DepthCompositeStream::deleteInternalStreams() {
683 // The 'CameraDeviceClient' parent will delete the blob stream
684 requestExit();
685
686 auto ret = join();
687 if (ret != OK) {
688 ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
689 strerror(-ret), ret);
690 }
691
Emilian Peev538c90e2018-12-17 18:03:19 +0000692 if (mDepthStreamId >= 0) {
Emilian Peevc0fe54c2020-03-11 14:05:07 -0700693 // Camera devices may not be valid after switching to offline mode.
694 // In this case, all offline streams including internal composite streams
695 // are managed and released by the offline session.
696 sp<CameraDeviceBase> device = mDevice.promote();
697 if (device.get() != nullptr) {
698 ret = device->deleteStream(mDepthStreamId);
699 }
700
Emilian Peev538c90e2018-12-17 18:03:19 +0000701 mDepthStreamId = -1;
702 }
703
Shuzhen Wang2c545042019-02-07 10:27:35 -0800704 if (mOutputSurface != nullptr) {
705 mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
706 mOutputSurface.clear();
707 }
708
Emilian Peev538c90e2018-12-17 18:03:19 +0000709 return ret;
710}
711
712void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
713 if (item.mDataSpace == kJpegDataSpace) {
714 ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
715 __func__, ns2ms(item.mTimestamp));
716
717 Mutex::Autolock l(mMutex);
718 if (!mErrorState) {
719 mInputJpegBuffers.push_back(item.mTimestamp);
720 mInputReadyCondition.signal();
721 }
722 } else if (item.mDataSpace == kDepthMapDataSpace) {
723 ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
724 ns2ms(item.mTimestamp));
725
726 Mutex::Autolock l(mMutex);
727 if (!mErrorState) {
728 mInputDepthBuffers.push_back(item.mTimestamp);
729 mInputReadyCondition.signal();
730 }
731 } else {
732 ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
733 }
734}
735
736status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
737 Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
738 if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
Emilian Peev538c90e2018-12-17 18:03:19 +0000739 outputStreamIds->push_back(mDepthStreamId);
740 }
741 (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
742
743 if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
Emilian Peev538c90e2018-12-17 18:03:19 +0000744 outputStreamIds->push_back(mBlobStreamId);
745 }
746 (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
747
748 if (currentStreamId != nullptr) {
749 *currentStreamId = mBlobStreamId;
750 }
751
752 return NO_ERROR;
753}
754
Emilian Peev4697b642019-11-19 17:11:14 -0800755status_t DepthCompositeStream::insertCompositeStreamIds(
756 std::vector<int32_t>* compositeStreamIds /*out*/) {
757 if (compositeStreamIds == nullptr) {
758 return BAD_VALUE;
759 }
760
761 compositeStreamIds->push_back(mDepthStreamId);
762 compositeStreamIds->push_back(mBlobStreamId);
763
764 return OK;
765}
766
Emilian Peev538c90e2018-12-17 18:03:19 +0000767void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
768 // Processing can continue even in case of result errors.
769 // At the moment depth composite stream processing relies mainly on static camera
770 // characteristics data. The actual result data can be used for the jpeg quality but
771 // in case it is absent we can default to maximum.
772 eraseResult(resultExtras.frameNumber);
773}
774
775bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
776 bool ret = false;
777 // Buffer errors concerning internal composite streams should not be directly visible to
778 // camera clients. They must only receive a single buffer error with the public composite
779 // stream id.
780 if ((resultExtras.errorStreamId == mDepthStreamId) ||
781 (resultExtras.errorStreamId == mBlobStreamId)) {
782 flagAnErrorFrameNumber(resultExtras.frameNumber);
783 ret = true;
784 }
785
786 return ret;
787}
788
789status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
790 const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
791 size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
792 if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
793 return BAD_VALUE;
794 }
795
796 float arTol = CameraProviderManager::kDepthARTolerance;
797 *depthWidth = *depthHeight = 0;
798
799 float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
800 for (const auto& it : supporedDepthSizes) {
801 auto currentWidth = std::get<0>(it);
802 auto currentHeight = std::get<1>(it);
803 if ((currentWidth == width) && (currentHeight == height)) {
804 *depthWidth = width;
805 *depthHeight = height;
806 break;
807 } else {
808 float currentRatio = static_cast<float> (currentWidth) /
809 static_cast<float> (currentHeight);
810 auto currentSize = currentWidth * currentHeight;
811 auto oldSize = (*depthWidth) * (*depthHeight);
812 if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
813 *depthWidth = currentWidth;
814 *depthHeight = currentHeight;
815 }
816 }
817 }
818
819 return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
820}
821
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800822void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch, bool maxResolution,
Emilian Peev538c90e2018-12-17 18:03:19 +0000823 std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
824 if (depthSizes == nullptr) {
825 return;
826 }
827
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800828 auto entry = ch.find(
829 camera3::SessionConfigurationUtils::getAppropriateModeTag(
830 ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, maxResolution));
Emilian Peev538c90e2018-12-17 18:03:19 +0000831 if (entry.count > 0) {
832 // Depth stream dimensions have four int32_t components
833 // (pixelformat, width, height, type)
834 size_t entryCount = entry.count / 4;
835 depthSizes->reserve(entryCount);
836 for (size_t i = 0; i < entry.count; i += 4) {
837 if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
838 (entry.data.i32[i+3] ==
839 ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
840 depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
841 entry.data.i32[i+2]));
842 }
843 }
844 }
845}
846
847status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
848 const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
849 if (compositeOutput == nullptr) {
850 return BAD_VALUE;
851 }
852
853 std::vector<std::tuple<size_t, size_t>> depthSizes;
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800854 std::vector<std::tuple<size_t, size_t>> depthSizesMaximumResolution;
855 getSupportedDepthSizes(ch, /*maxResolution*/false, &depthSizes);
Emilian Peev538c90e2018-12-17 18:03:19 +0000856 if (depthSizes.empty()) {
857 ALOGE("%s: No depth stream configurations present", __FUNCTION__);
858 return BAD_VALUE;
859 }
860
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800861 if (SessionConfigurationUtils::isUltraHighResolutionSensor(ch)) {
862 getSupportedDepthSizes(ch, /*maxResolution*/true, &depthSizesMaximumResolution);
863 if (depthSizesMaximumResolution.empty()) {
864 ALOGE("%s: No depth stream configurations for maximum resolution present",
865 __FUNCTION__);
866 return BAD_VALUE;
867 }
868 }
869
870 size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
871 auto ret = checkAndGetMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes,
872 depthSizesMaximumResolution, streamInfo.sensorPixelModesUsed, &chosenDepthWidth,
873 &chosenDepthHeight);
874
Emilian Peev538c90e2018-12-17 18:03:19 +0000875 if (ret != OK) {
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800876 ALOGE("%s: Couldn't get matching depth sizes", __FUNCTION__);
Emilian Peev538c90e2018-12-17 18:03:19 +0000877 return ret;
878 }
879
880 compositeOutput->clear();
881 compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
882
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800883 // Sensor pixel modes should stay the same here. They're already overridden.
Emilian Peev538c90e2018-12-17 18:03:19 +0000884 // Jpeg/Blob stream info
885 (*compositeOutput)[0].dataSpace = kJpegDataSpace;
886 (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
887
888 // Depth stream info
Jayant Chowdhary13f9b2f2020-12-02 22:46:15 -0800889 (*compositeOutput)[1].width = chosenDepthWidth;
890 (*compositeOutput)[1].height = chosenDepthHeight;
Emilian Peev538c90e2018-12-17 18:03:19 +0000891 (*compositeOutput)[1].format = kDepthMapPixelFormat;
892 (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
893 (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
894
895 return NO_ERROR;
896}
897
Emilian Peev538c90e2018-12-17 18:03:19 +0000898}; // namespace camera3
899}; // namespace android