blob: acad8c6a3a8c452d00b04b0c20b77c4413f4fdd6 [file] [log] [blame]
Emilian Peev538c90e2018-12-17 18:03:19 +00001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "Camera3-DepthCompositeStream"
18#define ATRACE_TAG ATRACE_TAG_CAMERA
19//#define LOG_NDEBUG 0
20
21#include "api1/client2/JpegProcessor.h"
22#include "common/CameraProviderManager.h"
Emilian Peev538c90e2018-12-17 18:03:19 +000023#include <gui/Surface.h>
24#include <utils/Log.h>
25#include <utils/Trace.h>
26
27#include "DepthCompositeStream.h"
28
Emilian Peev538c90e2018-12-17 18:03:19 +000029namespace android {
30namespace camera3 {
31
32DepthCompositeStream::DepthCompositeStream(wp<CameraDeviceBase> device,
33 wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
34 CompositeStream(device, cb),
35 mBlobStreamId(-1),
36 mBlobSurfaceId(-1),
37 mDepthStreamId(-1),
38 mDepthSurfaceId(-1),
39 mBlobWidth(0),
40 mBlobHeight(0),
41 mDepthBufferAcquired(false),
42 mBlobBufferAcquired(false),
43 mProducerListener(new ProducerListener()),
44 mMaxJpegSize(-1),
Emilian Peev29e9ec12020-01-02 12:43:50 -080045 mIsLogicalCamera(false) {
Emilian Peev538c90e2018-12-17 18:03:19 +000046 sp<CameraDeviceBase> cameraDevice = device.promote();
47 if (cameraDevice.get() != nullptr) {
48 CameraMetadata staticInfo = cameraDevice->info();
49 auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
50 if (entry.count > 0) {
51 mMaxJpegSize = entry.data.i32[0];
52 } else {
53 ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
54 }
55
56 entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
57 if (entry.count == 5) {
Emilian Peev94c98022019-06-19 09:11:51 -070058 mIntrinsicCalibration.reserve(5);
59 mIntrinsicCalibration.insert(mIntrinsicCalibration.end(), entry.data.f,
Emilian Peev538c90e2018-12-17 18:03:19 +000060 entry.data.f + 5);
61 } else {
62 ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
63 }
64
65 entry = staticInfo.find(ANDROID_LENS_DISTORTION);
66 if (entry.count == 5) {
67 mLensDistortion.reserve(5);
68 mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
69 } else {
70 ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
71 }
72
73 entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
74 for (size_t i = 0; i < entry.count; ++i) {
75 uint8_t capability = entry.data.u8[i];
76 if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
77 mIsLogicalCamera = true;
78 break;
79 }
80 }
81
82 getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes);
83 }
84}
85
86DepthCompositeStream::~DepthCompositeStream() {
87 mBlobConsumer.clear(),
88 mBlobSurface.clear(),
89 mBlobStreamId = -1;
90 mBlobSurfaceId = -1;
91 mDepthConsumer.clear();
92 mDepthSurface.clear();
93 mDepthConsumer = nullptr;
94 mDepthSurface = nullptr;
95}
96
97void DepthCompositeStream::compilePendingInputLocked() {
98 CpuConsumer::LockedBuffer imgBuffer;
99
100 while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
101 auto it = mInputJpegBuffers.begin();
102 auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
103 if (res == NOT_ENOUGH_DATA) {
104 // Can not lock any more buffers.
105 break;
106 } else if (res != OK) {
107 ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
108 strerror(-res), res);
109 mPendingInputFrames[*it].error = true;
Greg Kaiser07095df2019-01-29 06:28:58 -0800110 mInputJpegBuffers.erase(it);
Emilian Peev538c90e2018-12-17 18:03:19 +0000111 continue;
112 }
113
114 if (*it != imgBuffer.timestamp) {
115 ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
116 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
117 }
118
119 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
120 (mPendingInputFrames[imgBuffer.timestamp].error)) {
121 mBlobConsumer->unlockBuffer(imgBuffer);
122 } else {
123 mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
124 mBlobBufferAcquired = true;
125 }
126 mInputJpegBuffers.erase(it);
127 }
128
129 while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
130 auto it = mInputDepthBuffers.begin();
131 auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
132 if (res == NOT_ENOUGH_DATA) {
133 // Can not lock any more buffers.
134 break;
135 } else if (res != OK) {
136 ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
137 strerror(-res), res);
138 mPendingInputFrames[*it].error = true;
139 mInputDepthBuffers.erase(it);
140 continue;
141 }
142
143 if (*it != imgBuffer.timestamp) {
144 ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
145 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
146 }
147
148 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
149 (mPendingInputFrames[imgBuffer.timestamp].error)) {
150 mDepthConsumer->unlockBuffer(imgBuffer);
151 } else {
152 mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
153 mDepthBufferAcquired = true;
154 }
155 mInputDepthBuffers.erase(it);
156 }
157
158 while (!mCaptureResults.empty()) {
159 auto it = mCaptureResults.begin();
160 // Negative timestamp indicates that something went wrong during the capture result
161 // collection process.
162 if (it->first >= 0) {
163 mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
164 mPendingInputFrames[it->first].result = std::get<1>(it->second);
165 }
166 mCaptureResults.erase(it);
167 }
168
169 while (!mFrameNumberMap.empty()) {
170 auto it = mFrameNumberMap.begin();
171 mPendingInputFrames[it->second].frameNumber = it->first;
172 mFrameNumberMap.erase(it);
173 }
174
175 auto it = mErrorFrameNumbers.begin();
176 while (it != mErrorFrameNumbers.end()) {
177 bool frameFound = false;
178 for (auto &inputFrame : mPendingInputFrames) {
179 if (inputFrame.second.frameNumber == *it) {
180 inputFrame.second.error = true;
181 frameFound = true;
182 break;
183 }
184 }
185
186 if (frameFound) {
187 it = mErrorFrameNumbers.erase(it);
188 } else {
189 ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
190 *it);
191 it++;
192 }
193 }
194}
195
196bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
197 if (currentTs == nullptr) {
198 return false;
199 }
200
201 bool newInputAvailable = false;
202 for (const auto& it : mPendingInputFrames) {
203 if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
204 (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
205 *currentTs = it.first;
206 newInputAvailable = true;
207 }
208 }
209
210 return newInputAvailable;
211}
212
213int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
214 int64_t ret = -1;
215 if (currentTs == nullptr) {
216 return ret;
217 }
218
219 for (const auto& it : mPendingInputFrames) {
220 if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
221 *currentTs = it.first;
222 ret = it.second.frameNumber;
223 }
224 }
225
226 return ret;
227}
228
Emilian Peev90a839f2019-10-02 15:12:50 -0700229status_t DepthCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) {
Emilian Peev538c90e2018-12-17 18:03:19 +0000230 status_t res;
231 sp<ANativeWindow> outputANW = mOutputSurface;
232 ANativeWindowBuffer *anb;
233 int fenceFd;
234 void *dstBuffer;
Emilian Peev538c90e2018-12-17 18:03:19 +0000235
236 auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
237 inputFrame.jpegBuffer.width);
238 if (jpegSize == 0) {
239 ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
240 jpegSize = inputFrame.jpegBuffer.width;
241 }
242
Emilian Peev538c90e2018-12-17 18:03:19 +0000243 size_t maxDepthJpegSize;
244 if (mMaxJpegSize > 0) {
245 maxDepthJpegSize = mMaxJpegSize;
246 } else {
247 maxDepthJpegSize = std::max<size_t> (jpegSize,
248 inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
249 }
250 uint8_t jpegQuality = 100;
251 auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
252 if (entry.count > 0) {
253 jpegQuality = entry.data.u8[0];
254 }
Emilian Peev538c90e2018-12-17 18:03:19 +0000255
Emilian Peevcbf174b2019-01-25 14:38:59 -0800256 // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in
257 // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need
258 // max jpeg size.
259 size_t finalJpegBufferSize = maxDepthJpegSize * 3;
Emilian Peev538c90e2018-12-17 18:03:19 +0000260
Emilian Peevcbf174b2019-01-25 14:38:59 -0800261 if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1))
Emilian Peev538c90e2018-12-17 18:03:19 +0000262 != OK) {
263 ALOGE("%s: Unable to configure stream buffer dimensions"
Emilian Peevcbf174b2019-01-25 14:38:59 -0800264 " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId);
Emilian Peev538c90e2018-12-17 18:03:19 +0000265 return res;
266 }
267
268 res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
269 if (res != OK) {
270 ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
271 res);
272 return res;
273 }
274
275 sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
276 res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
277 if (res != OK) {
278 ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
279 strerror(-res), res);
280 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
281 return res;
282 }
283
Emilian Peevcbf174b2019-01-25 14:38:59 -0800284 if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) {
Emilian Peev538c90e2018-12-17 18:03:19 +0000285 ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
Emilian Peevcbf174b2019-01-25 14:38:59 -0800286 gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U);
Emilian Peev538c90e2018-12-17 18:03:19 +0000287 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
288 return BAD_VALUE;
289 }
290
Emilian Peevcbf174b2019-01-25 14:38:59 -0800291 DepthPhotoInputFrame depthPhoto;
292 depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data);
293 depthPhoto.mMainJpegWidth = mBlobWidth;
294 depthPhoto.mMainJpegHeight = mBlobHeight;
295 depthPhoto.mMainJpegSize = jpegSize;
296 depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data);
297 depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width;
298 depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height;
299 depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride;
300 depthPhoto.mJpegQuality = jpegQuality;
301 depthPhoto.mIsLogical = mIsLogicalCamera;
302 depthPhoto.mMaxJpegSize = maxDepthJpegSize;
303 // The camera intrinsic calibration layout is as follows:
304 // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
Emilian Peev94c98022019-06-19 09:11:51 -0700305 if (mIntrinsicCalibration.size() == 5) {
306 memcpy(depthPhoto.mIntrinsicCalibration, mIntrinsicCalibration.data(),
307 sizeof(depthPhoto.mIntrinsicCalibration));
308 depthPhoto.mIsIntrinsicCalibrationValid = 1;
Emilian Peevcbf174b2019-01-25 14:38:59 -0800309 } else {
Emilian Peev94c98022019-06-19 09:11:51 -0700310 depthPhoto.mIsIntrinsicCalibrationValid = 0;
Emilian Peevcbf174b2019-01-25 14:38:59 -0800311 }
312 // The camera lens distortion contains the following lens correction coefficients.
313 // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
314 if (mLensDistortion.size() == 5) {
315 memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(),
316 sizeof(depthPhoto.mLensDistortion));
317 depthPhoto.mIsLensDistortionValid = 1;
318 } else {
319 depthPhoto.mIsLensDistortionValid = 0;
320 }
Emilian Peev06af8c92019-02-07 12:34:41 -0800321 entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
322 if (entry.count > 0) {
323 // The camera jpeg orientation values must be within [0, 90, 180, 270].
324 switch (entry.data.i32[0]) {
325 case 0:
326 case 90:
327 case 180:
328 case 270:
329 depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]);
330 break;
331 default:
332 ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees",
333 __FUNCTION__, entry.data.i32[0]);
334 }
335 }
Emilian Peevcbf174b2019-01-25 14:38:59 -0800336
337 size_t actualJpegSize = 0;
Emilian Peev29e9ec12020-01-02 12:43:50 -0800338 res = processDepthPhotoFrame(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
Emilian Peevcbf174b2019-01-25 14:38:59 -0800339 if (res != 0) {
340 ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res);
341 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
342 return res;
343 }
344
345 size_t finalJpegSize = actualJpegSize + sizeof(struct camera3_jpeg_blob);
346 if (finalJpegSize > finalJpegBufferSize) {
347 ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
348 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
349 return NO_MEMORY;
350 }
351
Emilian Peev90a839f2019-10-02 15:12:50 -0700352 res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts);
353 if (res != OK) {
354 ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__,
355 getStreamId(), strerror(-res), res);
356 return res;
357 }
358
Emilian Peevcbf174b2019-01-25 14:38:59 -0800359 ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
Emilian Peev538c90e2018-12-17 18:03:19 +0000360 uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
361 (gb->getWidth() - sizeof(struct camera3_jpeg_blob));
362 struct camera3_jpeg_blob *blob = reinterpret_cast<struct camera3_jpeg_blob*> (header);
363 blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
Emilian Peevcbf174b2019-01-25 14:38:59 -0800364 blob->jpeg_size = actualJpegSize;
Emilian Peev538c90e2018-12-17 18:03:19 +0000365 outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
366
367 return res;
368}
369
370void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
371 if (inputFrame == nullptr) {
372 return;
373 }
374
375 if (inputFrame->depthBuffer.data != nullptr) {
376 mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
377 inputFrame->depthBuffer.data = nullptr;
378 mDepthBufferAcquired = false;
379 }
380
381 if (inputFrame->jpegBuffer.data != nullptr) {
382 mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
383 inputFrame->jpegBuffer.data = nullptr;
384 mBlobBufferAcquired = false;
385 }
386
387 if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
388 notifyError(inputFrame->frameNumber);
389 inputFrame->errorNotified = true;
390 }
391}
392
393void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
394 auto it = mPendingInputFrames.begin();
395 while (it != mPendingInputFrames.end()) {
396 if (it->first <= currentTs) {
397 releaseInputFrameLocked(&it->second);
398 it = mPendingInputFrames.erase(it);
399 } else {
400 it++;
401 }
402 }
403}
404
405bool DepthCompositeStream::threadLoop() {
406 int64_t currentTs = INT64_MAX;
407 bool newInputAvailable = false;
408
409 {
410 Mutex::Autolock l(mMutex);
411
412 if (mErrorState) {
413 // In case we landed in error state, return any pending buffers and
414 // halt all further processing.
415 compilePendingInputLocked();
416 releaseInputFramesLocked(currentTs);
417 return false;
418 }
419
420 while (!newInputAvailable) {
421 compilePendingInputLocked();
422 newInputAvailable = getNextReadyInputLocked(&currentTs);
423 if (!newInputAvailable) {
424 auto failingFrameNumber = getNextFailingInputLocked(&currentTs);
425 if (failingFrameNumber >= 0) {
426 // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
427 // possible for two internal stream buffers to fail. In such scenario the
428 // composite stream should notify the client about a stream buffer error only
429 // once and this information is kept within 'errorNotified'.
430 // Any present failed input frames will be removed on a subsequent call to
431 // 'releaseInputFramesLocked()'.
432 releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
433 currentTs = INT64_MAX;
434 }
435
436 auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
437 if (ret == TIMED_OUT) {
438 return true;
439 } else if (ret != OK) {
440 ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
441 strerror(-ret), ret);
442 return false;
443 }
444 }
445 }
446 }
447
Emilian Peev90a839f2019-10-02 15:12:50 -0700448 auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
Emilian Peev538c90e2018-12-17 18:03:19 +0000449 Mutex::Autolock l(mMutex);
450 if (res != OK) {
451 ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
452 currentTs, strerror(-res), res);
453 mPendingInputFrames[currentTs].error = true;
454 }
455
456 releaseInputFramesLocked(currentTs);
457
458 return true;
459}
460
461bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
462 ANativeWindow *anw = surface.get();
463 status_t err;
464 int format;
465 if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
466 String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
467 err);
468 ALOGE("%s: %s", __FUNCTION__, msg.string());
469 return false;
470 }
471
472 int dataspace;
473 if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
474 String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
475 err);
476 ALOGE("%s: %s", __FUNCTION__, msg.string());
477 return false;
478 }
479
480 if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
481 return true;
482 }
483
484 return false;
485}
486
487status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
488 bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
489 camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
490 std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
491 if (mSupportedDepthSizes.empty()) {
492 ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
493 return INVALID_OPERATION;
494 }
495
496 size_t depthWidth, depthHeight;
497 auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight);
498 if (ret != OK) {
499 ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
500 return ret;
501 }
502
503 sp<CameraDeviceBase> device = mDevice.promote();
504 if (!device.get()) {
505 ALOGE("%s: Invalid camera device!", __FUNCTION__);
506 return NO_INIT;
507 }
508
509 sp<IGraphicBufferProducer> producer;
510 sp<IGraphicBufferConsumer> consumer;
511 BufferQueue::createBufferQueue(&producer, &consumer);
512 mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
513 mBlobConsumer->setFrameAvailableListener(this);
514 mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
515 mBlobSurface = new Surface(producer);
516
517 ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
518 id, physicalCameraId, surfaceIds);
519 if (ret == OK) {
520 mBlobStreamId = *id;
521 mBlobSurfaceId = (*surfaceIds)[0];
522 mOutputSurface = consumers[0];
523 } else {
524 return ret;
525 }
526
527 BufferQueue::createBufferQueue(&producer, &consumer);
528 mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
529 mDepthConsumer->setFrameAvailableListener(this);
530 mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
531 mDepthSurface = new Surface(producer);
532 std::vector<int> depthSurfaceId;
533 ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
534 kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId);
535 if (ret == OK) {
536 mDepthSurfaceId = depthSurfaceId[0];
537 } else {
538 return ret;
539 }
540
541 ret = registerCompositeStreamListener(getStreamId());
542 if (ret != OK) {
543 ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
544 return ret;
545 }
546
547 ret = registerCompositeStreamListener(mDepthStreamId);
548 if (ret != OK) {
549 ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
550 return ret;
551 }
552
553 mBlobWidth = width;
554 mBlobHeight = height;
555
556 return ret;
557}
558
559status_t DepthCompositeStream::configureStream() {
560 if (isRunning()) {
561 // Processing thread is already running, nothing more to do.
562 return NO_ERROR;
563 }
564
565 if (mOutputSurface.get() == nullptr) {
566 ALOGE("%s: No valid output surface set!", __FUNCTION__);
567 return NO_INIT;
568 }
569
570 auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
571 if (res != OK) {
572 ALOGE("%s: Unable to connect to native window for stream %d",
573 __FUNCTION__, mBlobStreamId);
574 return res;
575 }
576
577 if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
578 != OK) {
579 ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
580 mBlobStreamId);
581 return res;
582 }
583
584 int maxProducerBuffers;
585 ANativeWindow *anw = mBlobSurface.get();
586 if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
587 ALOGE("%s: Unable to query consumer undequeued"
588 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
589 return res;
590 }
591
592 ANativeWindow *anwConsumer = mOutputSurface.get();
593 int maxConsumerBuffers;
594 if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
595 &maxConsumerBuffers)) != OK) {
596 ALOGE("%s: Unable to query consumer undequeued"
597 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
598 return res;
599 }
600
601 if ((res = native_window_set_buffer_count(
602 anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
603 ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
604 return res;
605 }
606
607 run("DepthCompositeStreamProc");
608
609 return NO_ERROR;
610}
611
612status_t DepthCompositeStream::deleteInternalStreams() {
613 // The 'CameraDeviceClient' parent will delete the blob stream
614 requestExit();
615
616 auto ret = join();
617 if (ret != OK) {
618 ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
619 strerror(-ret), ret);
620 }
621
622 sp<CameraDeviceBase> device = mDevice.promote();
623 if (!device.get()) {
624 ALOGE("%s: Invalid camera device!", __FUNCTION__);
625 return NO_INIT;
626 }
627
628 if (mDepthStreamId >= 0) {
629 ret = device->deleteStream(mDepthStreamId);
630 mDepthStreamId = -1;
631 }
632
Shuzhen Wang2c545042019-02-07 10:27:35 -0800633 if (mOutputSurface != nullptr) {
634 mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
635 mOutputSurface.clear();
636 }
637
Emilian Peev538c90e2018-12-17 18:03:19 +0000638 return ret;
639}
640
641void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
642 if (item.mDataSpace == kJpegDataSpace) {
643 ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
644 __func__, ns2ms(item.mTimestamp));
645
646 Mutex::Autolock l(mMutex);
647 if (!mErrorState) {
648 mInputJpegBuffers.push_back(item.mTimestamp);
649 mInputReadyCondition.signal();
650 }
651 } else if (item.mDataSpace == kDepthMapDataSpace) {
652 ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
653 ns2ms(item.mTimestamp));
654
655 Mutex::Autolock l(mMutex);
656 if (!mErrorState) {
657 mInputDepthBuffers.push_back(item.mTimestamp);
658 mInputReadyCondition.signal();
659 }
660 } else {
661 ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
662 }
663}
664
665status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
666 Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
667 if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
668 (*outSurfaceMap)[mDepthStreamId] = std::vector<size_t>();
669 outputStreamIds->push_back(mDepthStreamId);
670 }
671 (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
672
673 if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
674 (*outSurfaceMap)[mBlobStreamId] = std::vector<size_t>();
675 outputStreamIds->push_back(mBlobStreamId);
676 }
677 (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
678
679 if (currentStreamId != nullptr) {
680 *currentStreamId = mBlobStreamId;
681 }
682
683 return NO_ERROR;
684}
685
Emilian Peev4697b642019-11-19 17:11:14 -0800686status_t DepthCompositeStream::insertCompositeStreamIds(
687 std::vector<int32_t>* compositeStreamIds /*out*/) {
688 if (compositeStreamIds == nullptr) {
689 return BAD_VALUE;
690 }
691
692 compositeStreamIds->push_back(mDepthStreamId);
693 compositeStreamIds->push_back(mBlobStreamId);
694
695 return OK;
696}
697
Emilian Peev538c90e2018-12-17 18:03:19 +0000698void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
699 // Processing can continue even in case of result errors.
700 // At the moment depth composite stream processing relies mainly on static camera
701 // characteristics data. The actual result data can be used for the jpeg quality but
702 // in case it is absent we can default to maximum.
703 eraseResult(resultExtras.frameNumber);
704}
705
706bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
707 bool ret = false;
708 // Buffer errors concerning internal composite streams should not be directly visible to
709 // camera clients. They must only receive a single buffer error with the public composite
710 // stream id.
711 if ((resultExtras.errorStreamId == mDepthStreamId) ||
712 (resultExtras.errorStreamId == mBlobStreamId)) {
713 flagAnErrorFrameNumber(resultExtras.frameNumber);
714 ret = true;
715 }
716
717 return ret;
718}
719
720status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
721 const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
722 size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
723 if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
724 return BAD_VALUE;
725 }
726
727 float arTol = CameraProviderManager::kDepthARTolerance;
728 *depthWidth = *depthHeight = 0;
729
730 float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
731 for (const auto& it : supporedDepthSizes) {
732 auto currentWidth = std::get<0>(it);
733 auto currentHeight = std::get<1>(it);
734 if ((currentWidth == width) && (currentHeight == height)) {
735 *depthWidth = width;
736 *depthHeight = height;
737 break;
738 } else {
739 float currentRatio = static_cast<float> (currentWidth) /
740 static_cast<float> (currentHeight);
741 auto currentSize = currentWidth * currentHeight;
742 auto oldSize = (*depthWidth) * (*depthHeight);
743 if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
744 *depthWidth = currentWidth;
745 *depthHeight = currentHeight;
746 }
747 }
748 }
749
750 return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
751}
752
753void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch,
754 std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
755 if (depthSizes == nullptr) {
756 return;
757 }
758
759 auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
760 if (entry.count > 0) {
761 // Depth stream dimensions have four int32_t components
762 // (pixelformat, width, height, type)
763 size_t entryCount = entry.count / 4;
764 depthSizes->reserve(entryCount);
765 for (size_t i = 0; i < entry.count; i += 4) {
766 if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
767 (entry.data.i32[i+3] ==
768 ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
769 depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
770 entry.data.i32[i+2]));
771 }
772 }
773 }
774}
775
776status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
777 const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
778 if (compositeOutput == nullptr) {
779 return BAD_VALUE;
780 }
781
782 std::vector<std::tuple<size_t, size_t>> depthSizes;
783 getSupportedDepthSizes(ch, &depthSizes);
784 if (depthSizes.empty()) {
785 ALOGE("%s: No depth stream configurations present", __FUNCTION__);
786 return BAD_VALUE;
787 }
788
789 size_t depthWidth, depthHeight;
790 auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth,
791 &depthHeight);
792 if (ret != OK) {
793 ALOGE("%s: No matching depth stream size found", __FUNCTION__);
794 return ret;
795 }
796
797 compositeOutput->clear();
798 compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
799
800 // Jpeg/Blob stream info
801 (*compositeOutput)[0].dataSpace = kJpegDataSpace;
802 (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
803
804 // Depth stream info
805 (*compositeOutput)[1].width = depthWidth;
806 (*compositeOutput)[1].height = depthHeight;
807 (*compositeOutput)[1].format = kDepthMapPixelFormat;
808 (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
809 (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
810
811 return NO_ERROR;
812}
813
Emilian Peev538c90e2018-12-17 18:03:19 +0000814}; // namespace camera3
815}; // namespace android