blob: 2eec0f7f9ca87e3be9b1da0f6ff7e99e4f8eb026 [file] [log] [blame]
Emilian Peev538c90e2018-12-17 18:03:19 +00001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "Camera3-DepthCompositeStream"
18#define ATRACE_TAG ATRACE_TAG_CAMERA
19//#define LOG_NDEBUG 0
20
21#include "api1/client2/JpegProcessor.h"
22#include "common/CameraProviderManager.h"
Emilian Peevcbf174b2019-01-25 14:38:59 -080023#include "dlfcn.h"
Emilian Peev538c90e2018-12-17 18:03:19 +000024#include <gui/Surface.h>
25#include <utils/Log.h>
26#include <utils/Trace.h>
27
28#include "DepthCompositeStream.h"
29
Emilian Peev538c90e2018-12-17 18:03:19 +000030namespace android {
31namespace camera3 {
32
33DepthCompositeStream::DepthCompositeStream(wp<CameraDeviceBase> device,
34 wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
35 CompositeStream(device, cb),
36 mBlobStreamId(-1),
37 mBlobSurfaceId(-1),
38 mDepthStreamId(-1),
39 mDepthSurfaceId(-1),
40 mBlobWidth(0),
41 mBlobHeight(0),
42 mDepthBufferAcquired(false),
43 mBlobBufferAcquired(false),
44 mProducerListener(new ProducerListener()),
45 mMaxJpegSize(-1),
Emilian Peevcbf174b2019-01-25 14:38:59 -080046 mIsLogicalCamera(false),
47 mDepthPhotoLibHandle(nullptr),
48 mDepthPhotoProcess(nullptr) {
Emilian Peev538c90e2018-12-17 18:03:19 +000049 sp<CameraDeviceBase> cameraDevice = device.promote();
50 if (cameraDevice.get() != nullptr) {
51 CameraMetadata staticInfo = cameraDevice->info();
52 auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
53 if (entry.count > 0) {
54 mMaxJpegSize = entry.data.i32[0];
55 } else {
56 ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
57 }
58
59 entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
60 if (entry.count == 5) {
61 mInstrinsicCalibration.reserve(5);
62 mInstrinsicCalibration.insert(mInstrinsicCalibration.end(), entry.data.f,
63 entry.data.f + 5);
64 } else {
65 ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
66 }
67
68 entry = staticInfo.find(ANDROID_LENS_DISTORTION);
69 if (entry.count == 5) {
70 mLensDistortion.reserve(5);
71 mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
72 } else {
73 ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
74 }
75
76 entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
77 for (size_t i = 0; i < entry.count; ++i) {
78 uint8_t capability = entry.data.u8[i];
79 if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
80 mIsLogicalCamera = true;
81 break;
82 }
83 }
84
85 getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes);
Emilian Peevcbf174b2019-01-25 14:38:59 -080086
87 mDepthPhotoLibHandle = dlopen(camera3::kDepthPhotoLibrary, RTLD_NOW | RTLD_LOCAL);
88 if (mDepthPhotoLibHandle != nullptr) {
89 mDepthPhotoProcess = reinterpret_cast<camera3::process_depth_photo_frame> (
90 dlsym(mDepthPhotoLibHandle, camera3::kDepthPhotoProcessFunction));
91 if (mDepthPhotoProcess == nullptr) {
92 ALOGE("%s: Failed to link to depth photo process function: %s", __FUNCTION__,
93 dlerror());
94 }
95 } else {
96 ALOGE("%s: Failed to link to depth photo library: %s", __FUNCTION__, dlerror());
97 }
98
Emilian Peev538c90e2018-12-17 18:03:19 +000099 }
100}
101
102DepthCompositeStream::~DepthCompositeStream() {
103 mBlobConsumer.clear(),
104 mBlobSurface.clear(),
105 mBlobStreamId = -1;
106 mBlobSurfaceId = -1;
107 mDepthConsumer.clear();
108 mDepthSurface.clear();
109 mDepthConsumer = nullptr;
110 mDepthSurface = nullptr;
Emilian Peevcbf174b2019-01-25 14:38:59 -0800111 if (mDepthPhotoLibHandle != nullptr) {
112 dlclose(mDepthPhotoLibHandle);
113 mDepthPhotoLibHandle = nullptr;
114 }
115 mDepthPhotoProcess = nullptr;
Emilian Peev538c90e2018-12-17 18:03:19 +0000116}
117
118void DepthCompositeStream::compilePendingInputLocked() {
119 CpuConsumer::LockedBuffer imgBuffer;
120
121 while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
122 auto it = mInputJpegBuffers.begin();
123 auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
124 if (res == NOT_ENOUGH_DATA) {
125 // Can not lock any more buffers.
126 break;
127 } else if (res != OK) {
128 ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
129 strerror(-res), res);
130 mPendingInputFrames[*it].error = true;
Greg Kaiser07095df2019-01-29 06:28:58 -0800131 mInputJpegBuffers.erase(it);
Emilian Peev538c90e2018-12-17 18:03:19 +0000132 continue;
133 }
134
135 if (*it != imgBuffer.timestamp) {
136 ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
137 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
138 }
139
140 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
141 (mPendingInputFrames[imgBuffer.timestamp].error)) {
142 mBlobConsumer->unlockBuffer(imgBuffer);
143 } else {
144 mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
145 mBlobBufferAcquired = true;
146 }
147 mInputJpegBuffers.erase(it);
148 }
149
150 while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
151 auto it = mInputDepthBuffers.begin();
152 auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
153 if (res == NOT_ENOUGH_DATA) {
154 // Can not lock any more buffers.
155 break;
156 } else if (res != OK) {
157 ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
158 strerror(-res), res);
159 mPendingInputFrames[*it].error = true;
160 mInputDepthBuffers.erase(it);
161 continue;
162 }
163
164 if (*it != imgBuffer.timestamp) {
165 ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
166 "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
167 }
168
169 if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
170 (mPendingInputFrames[imgBuffer.timestamp].error)) {
171 mDepthConsumer->unlockBuffer(imgBuffer);
172 } else {
173 mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
174 mDepthBufferAcquired = true;
175 }
176 mInputDepthBuffers.erase(it);
177 }
178
179 while (!mCaptureResults.empty()) {
180 auto it = mCaptureResults.begin();
181 // Negative timestamp indicates that something went wrong during the capture result
182 // collection process.
183 if (it->first >= 0) {
184 mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
185 mPendingInputFrames[it->first].result = std::get<1>(it->second);
186 }
187 mCaptureResults.erase(it);
188 }
189
190 while (!mFrameNumberMap.empty()) {
191 auto it = mFrameNumberMap.begin();
192 mPendingInputFrames[it->second].frameNumber = it->first;
193 mFrameNumberMap.erase(it);
194 }
195
196 auto it = mErrorFrameNumbers.begin();
197 while (it != mErrorFrameNumbers.end()) {
198 bool frameFound = false;
199 for (auto &inputFrame : mPendingInputFrames) {
200 if (inputFrame.second.frameNumber == *it) {
201 inputFrame.second.error = true;
202 frameFound = true;
203 break;
204 }
205 }
206
207 if (frameFound) {
208 it = mErrorFrameNumbers.erase(it);
209 } else {
210 ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
211 *it);
212 it++;
213 }
214 }
215}
216
217bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
218 if (currentTs == nullptr) {
219 return false;
220 }
221
222 bool newInputAvailable = false;
223 for (const auto& it : mPendingInputFrames) {
224 if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
225 (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
226 *currentTs = it.first;
227 newInputAvailable = true;
228 }
229 }
230
231 return newInputAvailable;
232}
233
234int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
235 int64_t ret = -1;
236 if (currentTs == nullptr) {
237 return ret;
238 }
239
240 for (const auto& it : mPendingInputFrames) {
241 if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
242 *currentTs = it.first;
243 ret = it.second.frameNumber;
244 }
245 }
246
247 return ret;
248}
249
Emilian Peev538c90e2018-12-17 18:03:19 +0000250status_t DepthCompositeStream::processInputFrame(const InputFrame &inputFrame) {
251 status_t res;
252 sp<ANativeWindow> outputANW = mOutputSurface;
253 ANativeWindowBuffer *anb;
254 int fenceFd;
255 void *dstBuffer;
Emilian Peev538c90e2018-12-17 18:03:19 +0000256
257 auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
258 inputFrame.jpegBuffer.width);
259 if (jpegSize == 0) {
260 ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
261 jpegSize = inputFrame.jpegBuffer.width;
262 }
263
Emilian Peev538c90e2018-12-17 18:03:19 +0000264 size_t maxDepthJpegSize;
265 if (mMaxJpegSize > 0) {
266 maxDepthJpegSize = mMaxJpegSize;
267 } else {
268 maxDepthJpegSize = std::max<size_t> (jpegSize,
269 inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
270 }
271 uint8_t jpegQuality = 100;
272 auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
273 if (entry.count > 0) {
274 jpegQuality = entry.data.u8[0];
275 }
Emilian Peev538c90e2018-12-17 18:03:19 +0000276
Emilian Peevcbf174b2019-01-25 14:38:59 -0800277 // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in
278 // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need
279 // max jpeg size.
280 size_t finalJpegBufferSize = maxDepthJpegSize * 3;
Emilian Peev538c90e2018-12-17 18:03:19 +0000281
Emilian Peevcbf174b2019-01-25 14:38:59 -0800282 if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1))
Emilian Peev538c90e2018-12-17 18:03:19 +0000283 != OK) {
284 ALOGE("%s: Unable to configure stream buffer dimensions"
Emilian Peevcbf174b2019-01-25 14:38:59 -0800285 " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId);
Emilian Peev538c90e2018-12-17 18:03:19 +0000286 return res;
287 }
288
289 res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
290 if (res != OK) {
291 ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
292 res);
293 return res;
294 }
295
296 sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
297 res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
298 if (res != OK) {
299 ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
300 strerror(-res), res);
301 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
302 return res;
303 }
304
Emilian Peevcbf174b2019-01-25 14:38:59 -0800305 if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) {
Emilian Peev538c90e2018-12-17 18:03:19 +0000306 ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
Emilian Peevcbf174b2019-01-25 14:38:59 -0800307 gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U);
Emilian Peev538c90e2018-12-17 18:03:19 +0000308 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
309 return BAD_VALUE;
310 }
311
Emilian Peevcbf174b2019-01-25 14:38:59 -0800312 DepthPhotoInputFrame depthPhoto;
313 depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data);
314 depthPhoto.mMainJpegWidth = mBlobWidth;
315 depthPhoto.mMainJpegHeight = mBlobHeight;
316 depthPhoto.mMainJpegSize = jpegSize;
317 depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data);
318 depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width;
319 depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height;
320 depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride;
321 depthPhoto.mJpegQuality = jpegQuality;
322 depthPhoto.mIsLogical = mIsLogicalCamera;
323 depthPhoto.mMaxJpegSize = maxDepthJpegSize;
324 // The camera intrinsic calibration layout is as follows:
325 // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
326 if (mInstrinsicCalibration.size() == 5) {
327 memcpy(depthPhoto.mInstrinsicCalibration, mInstrinsicCalibration.data(),
328 sizeof(depthPhoto.mInstrinsicCalibration));
329 depthPhoto.mIsInstrinsicCalibrationValid = 1;
330 } else {
331 depthPhoto.mIsInstrinsicCalibrationValid = 0;
332 }
333 // The camera lens distortion contains the following lens correction coefficients.
334 // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
335 if (mLensDistortion.size() == 5) {
336 memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(),
337 sizeof(depthPhoto.mLensDistortion));
338 depthPhoto.mIsLensDistortionValid = 1;
339 } else {
340 depthPhoto.mIsLensDistortionValid = 0;
341 }
342
343 size_t actualJpegSize = 0;
344 res = mDepthPhotoProcess(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
345 if (res != 0) {
346 ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res);
347 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
348 return res;
349 }
350
351 size_t finalJpegSize = actualJpegSize + sizeof(struct camera3_jpeg_blob);
352 if (finalJpegSize > finalJpegBufferSize) {
353 ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
354 outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
355 return NO_MEMORY;
356 }
357
358 ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
Emilian Peev538c90e2018-12-17 18:03:19 +0000359 uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
360 (gb->getWidth() - sizeof(struct camera3_jpeg_blob));
361 struct camera3_jpeg_blob *blob = reinterpret_cast<struct camera3_jpeg_blob*> (header);
362 blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
Emilian Peevcbf174b2019-01-25 14:38:59 -0800363 blob->jpeg_size = actualJpegSize;
Emilian Peev538c90e2018-12-17 18:03:19 +0000364 outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
365
366 return res;
367}
368
369void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
370 if (inputFrame == nullptr) {
371 return;
372 }
373
374 if (inputFrame->depthBuffer.data != nullptr) {
375 mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
376 inputFrame->depthBuffer.data = nullptr;
377 mDepthBufferAcquired = false;
378 }
379
380 if (inputFrame->jpegBuffer.data != nullptr) {
381 mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
382 inputFrame->jpegBuffer.data = nullptr;
383 mBlobBufferAcquired = false;
384 }
385
386 if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
387 notifyError(inputFrame->frameNumber);
388 inputFrame->errorNotified = true;
389 }
390}
391
392void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
393 auto it = mPendingInputFrames.begin();
394 while (it != mPendingInputFrames.end()) {
395 if (it->first <= currentTs) {
396 releaseInputFrameLocked(&it->second);
397 it = mPendingInputFrames.erase(it);
398 } else {
399 it++;
400 }
401 }
402}
403
404bool DepthCompositeStream::threadLoop() {
405 int64_t currentTs = INT64_MAX;
406 bool newInputAvailable = false;
407
408 {
409 Mutex::Autolock l(mMutex);
410
411 if (mErrorState) {
412 // In case we landed in error state, return any pending buffers and
413 // halt all further processing.
414 compilePendingInputLocked();
415 releaseInputFramesLocked(currentTs);
416 return false;
417 }
418
419 while (!newInputAvailable) {
420 compilePendingInputLocked();
421 newInputAvailable = getNextReadyInputLocked(&currentTs);
422 if (!newInputAvailable) {
423 auto failingFrameNumber = getNextFailingInputLocked(&currentTs);
424 if (failingFrameNumber >= 0) {
425 // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
426 // possible for two internal stream buffers to fail. In such scenario the
427 // composite stream should notify the client about a stream buffer error only
428 // once and this information is kept within 'errorNotified'.
429 // Any present failed input frames will be removed on a subsequent call to
430 // 'releaseInputFramesLocked()'.
431 releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
432 currentTs = INT64_MAX;
433 }
434
435 auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
436 if (ret == TIMED_OUT) {
437 return true;
438 } else if (ret != OK) {
439 ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
440 strerror(-ret), ret);
441 return false;
442 }
443 }
444 }
445 }
446
447 auto res = processInputFrame(mPendingInputFrames[currentTs]);
448 Mutex::Autolock l(mMutex);
449 if (res != OK) {
450 ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
451 currentTs, strerror(-res), res);
452 mPendingInputFrames[currentTs].error = true;
453 }
454
455 releaseInputFramesLocked(currentTs);
456
457 return true;
458}
459
460bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
461 ANativeWindow *anw = surface.get();
462 status_t err;
463 int format;
464 if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
465 String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
466 err);
467 ALOGE("%s: %s", __FUNCTION__, msg.string());
468 return false;
469 }
470
471 int dataspace;
472 if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
473 String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
474 err);
475 ALOGE("%s: %s", __FUNCTION__, msg.string());
476 return false;
477 }
478
479 if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
480 return true;
481 }
482
483 return false;
484}
485
486status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
487 bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
488 camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
489 std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
490 if (mSupportedDepthSizes.empty()) {
491 ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
492 return INVALID_OPERATION;
493 }
494
495 size_t depthWidth, depthHeight;
496 auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight);
497 if (ret != OK) {
498 ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
499 return ret;
500 }
501
502 sp<CameraDeviceBase> device = mDevice.promote();
503 if (!device.get()) {
504 ALOGE("%s: Invalid camera device!", __FUNCTION__);
505 return NO_INIT;
506 }
507
508 sp<IGraphicBufferProducer> producer;
509 sp<IGraphicBufferConsumer> consumer;
510 BufferQueue::createBufferQueue(&producer, &consumer);
511 mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
512 mBlobConsumer->setFrameAvailableListener(this);
513 mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
514 mBlobSurface = new Surface(producer);
515
516 ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
517 id, physicalCameraId, surfaceIds);
518 if (ret == OK) {
519 mBlobStreamId = *id;
520 mBlobSurfaceId = (*surfaceIds)[0];
521 mOutputSurface = consumers[0];
522 } else {
523 return ret;
524 }
525
526 BufferQueue::createBufferQueue(&producer, &consumer);
527 mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
528 mDepthConsumer->setFrameAvailableListener(this);
529 mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
530 mDepthSurface = new Surface(producer);
531 std::vector<int> depthSurfaceId;
532 ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
533 kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId);
534 if (ret == OK) {
535 mDepthSurfaceId = depthSurfaceId[0];
536 } else {
537 return ret;
538 }
539
540 ret = registerCompositeStreamListener(getStreamId());
541 if (ret != OK) {
542 ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
543 return ret;
544 }
545
546 ret = registerCompositeStreamListener(mDepthStreamId);
547 if (ret != OK) {
548 ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
549 return ret;
550 }
551
552 mBlobWidth = width;
553 mBlobHeight = height;
554
555 return ret;
556}
557
558status_t DepthCompositeStream::configureStream() {
559 if (isRunning()) {
560 // Processing thread is already running, nothing more to do.
561 return NO_ERROR;
562 }
563
Emilian Peevcbf174b2019-01-25 14:38:59 -0800564 if ((mDepthPhotoLibHandle == nullptr) || (mDepthPhotoProcess == nullptr)) {
565 ALOGE("%s: Depth photo library is not present!", __FUNCTION__);
566 return NO_INIT;
567 }
568
Emilian Peev538c90e2018-12-17 18:03:19 +0000569 if (mOutputSurface.get() == nullptr) {
570 ALOGE("%s: No valid output surface set!", __FUNCTION__);
571 return NO_INIT;
572 }
573
574 auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
575 if (res != OK) {
576 ALOGE("%s: Unable to connect to native window for stream %d",
577 __FUNCTION__, mBlobStreamId);
578 return res;
579 }
580
581 if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
582 != OK) {
583 ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
584 mBlobStreamId);
585 return res;
586 }
587
588 int maxProducerBuffers;
589 ANativeWindow *anw = mBlobSurface.get();
590 if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
591 ALOGE("%s: Unable to query consumer undequeued"
592 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
593 return res;
594 }
595
596 ANativeWindow *anwConsumer = mOutputSurface.get();
597 int maxConsumerBuffers;
598 if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
599 &maxConsumerBuffers)) != OK) {
600 ALOGE("%s: Unable to query consumer undequeued"
601 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
602 return res;
603 }
604
605 if ((res = native_window_set_buffer_count(
606 anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
607 ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
608 return res;
609 }
610
611 run("DepthCompositeStreamProc");
612
613 return NO_ERROR;
614}
615
616status_t DepthCompositeStream::deleteInternalStreams() {
617 // The 'CameraDeviceClient' parent will delete the blob stream
618 requestExit();
619
620 auto ret = join();
621 if (ret != OK) {
622 ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
623 strerror(-ret), ret);
624 }
625
626 sp<CameraDeviceBase> device = mDevice.promote();
627 if (!device.get()) {
628 ALOGE("%s: Invalid camera device!", __FUNCTION__);
629 return NO_INIT;
630 }
631
632 if (mDepthStreamId >= 0) {
633 ret = device->deleteStream(mDepthStreamId);
634 mDepthStreamId = -1;
635 }
636
Shuzhen Wang2c545042019-02-07 10:27:35 -0800637 if (mOutputSurface != nullptr) {
638 mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
639 mOutputSurface.clear();
640 }
641
Emilian Peev538c90e2018-12-17 18:03:19 +0000642 return ret;
643}
644
645void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
646 if (item.mDataSpace == kJpegDataSpace) {
647 ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
648 __func__, ns2ms(item.mTimestamp));
649
650 Mutex::Autolock l(mMutex);
651 if (!mErrorState) {
652 mInputJpegBuffers.push_back(item.mTimestamp);
653 mInputReadyCondition.signal();
654 }
655 } else if (item.mDataSpace == kDepthMapDataSpace) {
656 ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
657 ns2ms(item.mTimestamp));
658
659 Mutex::Autolock l(mMutex);
660 if (!mErrorState) {
661 mInputDepthBuffers.push_back(item.mTimestamp);
662 mInputReadyCondition.signal();
663 }
664 } else {
665 ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
666 }
667}
668
669status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
670 Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
671 if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
672 (*outSurfaceMap)[mDepthStreamId] = std::vector<size_t>();
673 outputStreamIds->push_back(mDepthStreamId);
674 }
675 (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
676
677 if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
678 (*outSurfaceMap)[mBlobStreamId] = std::vector<size_t>();
679 outputStreamIds->push_back(mBlobStreamId);
680 }
681 (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
682
683 if (currentStreamId != nullptr) {
684 *currentStreamId = mBlobStreamId;
685 }
686
687 return NO_ERROR;
688}
689
690void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
691 // Processing can continue even in case of result errors.
692 // At the moment depth composite stream processing relies mainly on static camera
693 // characteristics data. The actual result data can be used for the jpeg quality but
694 // in case it is absent we can default to maximum.
695 eraseResult(resultExtras.frameNumber);
696}
697
698bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
699 bool ret = false;
700 // Buffer errors concerning internal composite streams should not be directly visible to
701 // camera clients. They must only receive a single buffer error with the public composite
702 // stream id.
703 if ((resultExtras.errorStreamId == mDepthStreamId) ||
704 (resultExtras.errorStreamId == mBlobStreamId)) {
705 flagAnErrorFrameNumber(resultExtras.frameNumber);
706 ret = true;
707 }
708
709 return ret;
710}
711
712status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
713 const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
714 size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
715 if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
716 return BAD_VALUE;
717 }
718
719 float arTol = CameraProviderManager::kDepthARTolerance;
720 *depthWidth = *depthHeight = 0;
721
722 float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
723 for (const auto& it : supporedDepthSizes) {
724 auto currentWidth = std::get<0>(it);
725 auto currentHeight = std::get<1>(it);
726 if ((currentWidth == width) && (currentHeight == height)) {
727 *depthWidth = width;
728 *depthHeight = height;
729 break;
730 } else {
731 float currentRatio = static_cast<float> (currentWidth) /
732 static_cast<float> (currentHeight);
733 auto currentSize = currentWidth * currentHeight;
734 auto oldSize = (*depthWidth) * (*depthHeight);
735 if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
736 *depthWidth = currentWidth;
737 *depthHeight = currentHeight;
738 }
739 }
740 }
741
742 return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
743}
744
745void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch,
746 std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
747 if (depthSizes == nullptr) {
748 return;
749 }
750
751 auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
752 if (entry.count > 0) {
753 // Depth stream dimensions have four int32_t components
754 // (pixelformat, width, height, type)
755 size_t entryCount = entry.count / 4;
756 depthSizes->reserve(entryCount);
757 for (size_t i = 0; i < entry.count; i += 4) {
758 if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
759 (entry.data.i32[i+3] ==
760 ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
761 depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
762 entry.data.i32[i+2]));
763 }
764 }
765 }
766}
767
768status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
769 const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
770 if (compositeOutput == nullptr) {
771 return BAD_VALUE;
772 }
773
774 std::vector<std::tuple<size_t, size_t>> depthSizes;
775 getSupportedDepthSizes(ch, &depthSizes);
776 if (depthSizes.empty()) {
777 ALOGE("%s: No depth stream configurations present", __FUNCTION__);
778 return BAD_VALUE;
779 }
780
781 size_t depthWidth, depthHeight;
782 auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth,
783 &depthHeight);
784 if (ret != OK) {
785 ALOGE("%s: No matching depth stream size found", __FUNCTION__);
786 return ret;
787 }
788
789 compositeOutput->clear();
790 compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
791
792 // Jpeg/Blob stream info
793 (*compositeOutput)[0].dataSpace = kJpegDataSpace;
794 (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
795
796 // Depth stream info
797 (*compositeOutput)[1].width = depthWidth;
798 (*compositeOutput)[1].height = depthHeight;
799 (*compositeOutput)[1].format = kDepthMapPixelFormat;
800 (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
801 (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
802
803 return NO_ERROR;
804}
805
Emilian Peev538c90e2018-12-17 18:03:19 +0000806}; // namespace camera3
807}; // namespace android