cameraserver: Add HAL plumbing + capture request verification for quad bayer sensor apis.
- Verify that for 'high resolution' sensors, capture requests have
sensor pixel modes which are consistent with what their output targets
were configured with.
- Add support for
@3.7::ICameraDevice::isSessionConfigurationSupported_3_7
@3.7::ICameraDevice::configureStreams_3_7
@2.7::ICameraProvider::isConcurrentSessionConfigurationSupported_2_7
- For ZoomRatio(Distortion)Mapper, use MAXIMUM_RESOLUTION variants of SENSOR_INFO*
and LENS_CALIBRATION / LENS_DISTORTION while doing coordinate calculations.
Bug: 152813564
Test: Camera CTS
Test: Camera binder tests
Change-Id: I41a86a55e619b25e17e701955ba8c345013329b9
Signed-off-by: Jayant Chowdhary <jchowdhary@google.com>
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 8cccbb1..1b65d1a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -125,8 +125,8 @@
/*listener*/this,
/*sendPartials*/true);
- auto deviceInfo = mDevice->info();
- camera_metadata_entry_t physicalKeysEntry = deviceInfo.find(
+ const CameraMetadata &deviceInfo = mDevice->info();
+ camera_metadata_ro_entry_t physicalKeysEntry = deviceInfo.find(
ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS);
if (physicalKeysEntry.count > 0) {
mSupportedPhysicalRequestKeys.insert(mSupportedPhysicalRequestKeys.begin(),
@@ -135,6 +135,17 @@
}
mProviderManager = providerPtr;
+ // Cache physical camera ids corresponding to this device and also the high
+ // resolution sensors in this device + physical camera ids
+ mProviderManager->isLogicalCamera(mCameraIdStr.string(), &mPhysicalCameraIds);
+ if (isUltraHighResolutionSensor(mCameraIdStr)) {
+ mHighResolutionSensors.insert(mCameraIdStr.string());
+ }
+ for (auto &physicalId : mPhysicalCameraIds) {
+ if (isUltraHighResolutionSensor(String8(physicalId.c_str()))) {
+ mHighResolutionSensors.insert(physicalId.c_str());
+ }
+ }
return OK;
}
@@ -186,6 +197,17 @@
return binder::Status::ok();
}
+static std::list<int> getIntersection(const std::unordered_set<int> &streamIdsForThisCamera,
+ const Vector<int> &streamIdsForThisRequest) {
+ std::list<int> intersection;
+ for (auto &streamId : streamIdsForThisRequest) {
+ if (streamIdsForThisCamera.find(streamId) != streamIdsForThisCamera.end()) {
+ intersection.emplace_back(streamId);
+ }
+ }
+ return intersection;
+}
+
binder::Status CameraDeviceClient::submitRequestList(
const std::vector<hardware::camera2::CaptureRequest>& requests,
bool streaming,
@@ -332,6 +354,24 @@
"Request settings are empty");
}
+ // Check whether the physical / logical stream has settings
+ // consistent with the sensor pixel mode(s) it was configured with.
+ // mCameraIdToStreamSet will only have ids that are high resolution
+ const auto streamIdSetIt = mHighResolutionCameraIdToStreamIdSet.find(it.id);
+ if (streamIdSetIt != mHighResolutionCameraIdToStreamIdSet.end()) {
+ std::list<int> streamIdsUsedInRequest = getIntersection(streamIdSetIt->second,
+ outputStreamIds);
+ if (!request.mIsReprocess &&
+ !isSensorPixelModeConsistent(streamIdsUsedInRequest, it.settings)) {
+ ALOGE("%s: Camera %s: Request settings CONTROL_SENSOR_PIXEL_MODE not "
+ "consistent with configured streams. Rejecting request.",
+ __FUNCTION__, it.id.c_str());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Request settings CONTROL_SENSOR_PIXEL_MODE are not consistent with "
+ "streams configured");
+ }
+ }
+
String8 physicalId(it.id.c_str());
if (physicalId != mDevice->getId()) {
auto found = std::find(requestedPhysicalIds.begin(), requestedPhysicalIds.end(),
@@ -494,7 +534,7 @@
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
- res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
+ res = camera3::SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
mCameraIdStr);
if (!res.isOk()) {
return res;
@@ -560,8 +600,8 @@
binder::Status CameraDeviceClient::isSessionConfigurationSupported(
const SessionConfiguration& sessionConfiguration, bool *status /*out*/) {
- ATRACE_CALL();
+ ATRACE_CALL();
binder::Status res;
status_t ret = OK;
if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
@@ -573,7 +613,7 @@
}
auto operatingMode = sessionConfiguration.getOperatingMode();
- res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
+ res = camera3::SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
mCameraIdStr);
if (!res.isOk()) {
return res;
@@ -589,7 +629,7 @@
metadataGetter getMetadata = [this](const String8 &id) {return mDevice->infoPhysical(id);};
std::vector<std::string> physicalCameraIds;
mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
- res = SessionConfigurationUtils::convertToHALStreamCombination(sessionConfiguration,
+ res = camera3::SessionConfigurationUtils::convertToHALStreamCombination(sessionConfiguration,
mCameraIdStr, mDevice->info(), getMetadata, physicalCameraIds, streamConfiguration,
&earlyExit);
if (!res.isOk()) {
@@ -714,6 +754,13 @@
}
mCompositeStreamMap.removeItemsAt(compositeIndex);
}
+ for (auto &mapIt: mHighResolutionCameraIdToStreamIdSet) {
+ auto &streamSet = mapIt.second;
+ if (streamSet.find(streamId) != streamSet.end()) {
+ streamSet.erase(streamId);
+ break;
+ }
+ }
}
}
@@ -740,7 +787,7 @@
bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
bool isMultiResolution = outputConfiguration.isMultiResolution();
- res = SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
+ res = camera3::SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
outputConfiguration.getSurfaceType());
if (!res.isOk()) {
return res;
@@ -749,10 +796,8 @@
if (!mDevice.get()) {
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
- std::vector<std::string> physicalCameraIds;
- mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
- res = SessionConfigurationUtils::checkPhysicalCameraId(physicalCameraIds, physicalCameraId,
- mCameraIdStr);
+ res = camera3::SessionConfigurationUtils::checkPhysicalCameraId(mPhysicalCameraIds,
+ physicalCameraId, mCameraIdStr);
if (!res.isOk()) {
return res;
}
@@ -768,6 +813,8 @@
OutputStreamInfo streamInfo;
bool isStreamInfoValid = false;
+ const std::vector<int32_t> &sensorPixelModesUsed =
+ outputConfiguration.getSensorPixelModesUsed();
for (auto& bufferProducer : bufferProducers) {
// Don't create multiple streams for the same target surface
sp<IBinder> binder = IInterface::asBinder(bufferProducer);
@@ -780,8 +827,9 @@
}
sp<Surface> surface;
- res = SessionConfigurationUtils::createSurfaceFromGbp(streamInfo, isStreamInfoValid,
- surface, bufferProducer, mCameraIdStr, mDevice->infoPhysical(physicalCameraId));
+ res = camera3::SessionConfigurationUtils::createSurfaceFromGbp(streamInfo,
+ isStreamInfoValid, surface, bufferProducer, mCameraIdStr,
+ mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed);
if (!res.isOk())
return res;
@@ -793,10 +841,10 @@
binders.push_back(IInterface::asBinder(bufferProducer));
surfaces.push_back(surface);
}
-
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
std::vector<int> surfaceIds;
- bool isDepthCompositeStream = camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0]);
+ bool isDepthCompositeStream =
+ camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0]);
bool isHeicCompisiteStream = camera3::HeicCompositeStream::isHeicCompositeStream(surfaces[0]);
if (isDepthCompositeStream || isHeicCompisiteStream) {
sp<CompositeStream> compositeStream;
@@ -809,8 +857,8 @@
err = compositeStream->createStream(surfaces, deferredConsumer, streamInfo.width,
streamInfo.height, streamInfo.format,
static_cast<camera_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
- isShared, isMultiResolution);
+ &streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
+ outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution);
if (err == OK) {
mCompositeStreamMap.add(IInterface::asBinder(surfaces[0]->getIGraphicBufferProducer()),
compositeStream);
@@ -819,8 +867,8 @@
err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
streamInfo.height, streamInfo.format, streamInfo.dataSpace,
static_cast<camera_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
- isShared, isMultiResolution);
+ &streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
+ outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution);
}
if (err != OK) {
@@ -848,6 +896,16 @@
// Set transform flags to ensure preview to be rotated correctly.
res = setStreamTransformLocked(streamId);
+ // Fill in mHighResolutionCameraIdToStreamIdSet map
+ const String8 &cameraIdUsed =
+ physicalCameraId.size() != 0 ? physicalCameraId : mCameraIdStr;
+ const char *cameraIdUsedCStr = cameraIdUsed.string();
+ // Only needed for high resolution sensors
+ if (mHighResolutionSensors.find(cameraIdUsedCStr) !=
+ mHighResolutionSensors.end()) {
+ mHighResolutionCameraIdToStreamIdSet[cameraIdUsedCStr].insert(streamId);
+ }
+
*newStreamId = streamId;
}
@@ -884,10 +942,25 @@
std::vector<sp<Surface>> noSurface;
std::vector<int> surfaceIds;
String8 physicalCameraId(outputConfiguration.getPhysicalCameraId());
+ const String8 &cameraIdUsed =
+ physicalCameraId.size() != 0 ? physicalCameraId : mCameraIdStr;
+ // Here, we override sensor pixel modes
+ std::unordered_set<int32_t> overriddenSensorPixelModesUsed;
+ const std::vector<int32_t> &sensorPixelModesUsed =
+ outputConfiguration.getSensorPixelModesUsed();
+ if (camera3::SessionConfigurationUtils::checkAndOverrideSensorPixelModesUsed(
+ sensorPixelModesUsed, format, width, height, getStaticInfo(cameraIdUsed),
+ /*allowRounding*/ false, &overriddenSensorPixelModesUsed) != OK) {
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "sensor pixel modes used not valid for deferred stream");
+ }
+
err = mDevice->createStream(noSurface, /*hasDeferredConsumer*/true, width,
height, format, dataSpace,
static_cast<camera_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, physicalCameraId, &surfaceIds,
+ &streamId, physicalCameraId,
+ overriddenSensorPixelModesUsed,
+ &surfaceIds,
outputConfiguration.getSurfaceSetID(), isShared,
outputConfiguration.isMultiResolution(), consumerUsage);
@@ -900,9 +973,9 @@
// a separate list to track. Once the deferred surface is set, this id will be
// relocated to mStreamMap.
mDeferredStreams.push_back(streamId);
-
mStreamInfoMap.emplace(std::piecewise_construct, std::forward_as_tuple(streamId),
- std::forward_as_tuple(width, height, format, dataSpace, consumerUsage));
+ std::forward_as_tuple(width, height, format, dataSpace, consumerUsage,
+ overriddenSensorPixelModesUsed));
ALOGV("%s: Camera %s: Successfully created a new stream ID %d for a deferred surface"
" (%d x %d) stream with format 0x%x.",
@@ -912,6 +985,13 @@
res = setStreamTransformLocked(streamId);
*newStreamId = streamId;
+ // Fill in mHighResolutionCameraIdToStreamIdSet
+ const char *cameraIdUsedCStr = cameraIdUsed.string();
+ // Only needed for high resolution sensors
+ if (mHighResolutionSensors.find(cameraIdUsedCStr) !=
+ mHighResolutionSensors.end()) {
+ mHighResolutionCameraIdToStreamIdSet[cameraIdUsed.string()].insert(streamId);
+ }
}
return res;
}
@@ -1081,13 +1161,15 @@
newOutputsMap.removeItemsAt(idx);
}
}
+ const std::vector<int32_t> &sensorPixelModesUsed =
+ outputConfiguration.getSensorPixelModesUsed();
for (size_t i = 0; i < newOutputsMap.size(); i++) {
OutputStreamInfo outInfo;
sp<Surface> surface;
- res = SessionConfigurationUtils::createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false,
- surface, newOutputsMap.valueAt(i), mCameraIdStr,
- mDevice->infoPhysical(physicalCameraId));
+ res = camera3::SessionConfigurationUtils::createSurfaceFromGbp(outInfo,
+ /*isStreamInfoValid*/ false, surface, newOutputsMap.valueAt(i), mCameraIdStr,
+ mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed);
if (!res.isOk())
return res;
@@ -1442,6 +1524,8 @@
}
std::vector<sp<Surface>> consumerSurfaces;
+ const std::vector<int32_t> &sensorPixelModesUsed =
+ outputConfiguration.getSensorPixelModesUsed();
for (auto& bufferProducer : bufferProducers) {
// Don't create multiple streams for the same target surface
ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
@@ -1452,9 +1536,9 @@
}
sp<Surface> surface;
- res = SessionConfigurationUtils::createSurfaceFromGbp(mStreamInfoMap[streamId],
+ res = camera3::SessionConfigurationUtils::createSurfaceFromGbp(mStreamInfoMap[streamId],
true /*isStreamInfoValid*/, surface, bufferProducer, mCameraIdStr,
- mDevice->infoPhysical(physicalId));
+ mDevice->infoPhysical(physicalId), sensorPixelModesUsed);
if (!res.isOk())
return res;
@@ -1936,4 +2020,54 @@
return ret;
}
+
+const CameraMetadata &CameraDeviceClient::getStaticInfo(const String8 &cameraId) {
+ if (mDevice->getId() == cameraId) {
+ return mDevice->info();
+ }
+ return mDevice->infoPhysical(cameraId);
+}
+
+bool CameraDeviceClient::isUltraHighResolutionSensor(const String8 &cameraId) {
+ const CameraMetadata &deviceInfo = getStaticInfo(cameraId);
+ return camera3::SessionConfigurationUtils::isUltraHighResolutionSensor(deviceInfo);
+}
+
+bool CameraDeviceClient::isSensorPixelModeConsistent(
+ const std::list<int> &streamIdList, const CameraMetadata &settings) {
+ // First we get the sensorPixelMode from the settings metadata.
+ int32_t sensorPixelMode = ANDROID_SENSOR_PIXEL_MODE_DEFAULT;
+ camera_metadata_ro_entry sensorPixelModeEntry = settings.find(ANDROID_SENSOR_PIXEL_MODE);
+ if (sensorPixelModeEntry.count != 0) {
+ sensorPixelMode = sensorPixelModeEntry.data.u8[0];
+ if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT &&
+ sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) {
+ ALOGE("%s: Request sensor pixel mode not is not one of the valid values %d",
+ __FUNCTION__, sensorPixelMode);
+ return false;
+ }
+ }
+ // Check whether each stream has max resolution allowed.
+ bool consistent = true;
+ for (auto it : streamIdList) {
+ auto const streamInfoIt = mStreamInfoMap.find(it);
+ if (streamInfoIt == mStreamInfoMap.end()) {
+ ALOGE("%s: stream id %d not created, skipping", __FUNCTION__, it);
+ return false;
+ }
+ consistent =
+ streamInfoIt->second.sensorPixelModesUsed.find(sensorPixelMode) !=
+ streamInfoIt->second.sensorPixelModesUsed.end();
+ if (!consistent) {
+ ALOGE("sensorPixelMode used %i not consistent with configured modes", sensorPixelMode);
+ for (auto m : streamInfoIt->second.sensorPixelModesUsed) {
+ ALOGE("sensor pixel mode used list: %i", m);
+ }
+ break;
+ }
+ }
+
+ return consistent;
+}
+
} // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 9f7a4af..adedf92 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -28,6 +28,7 @@
#include "common/FrameProcessorBase.h"
#include "common/Camera2ClientBase.h"
#include "CompositeStream.h"
+#include "utils/SessionConfigurationUtils.h"
using android::camera3::OutputStreamInfo;
using android::camera3::CompositeStream;
@@ -222,6 +223,13 @@
// Calculate the ANativeWindow transform from android.sensor.orientation
status_t getRotationTransformLocked(/*out*/int32_t* transform);
+ bool isUltraHighResolutionSensor(const String8 &cameraId);
+
+ bool isSensorPixelModeConsistent(const std::list<int> &streamIdList,
+ const CameraMetadata &settings);
+
+ const CameraMetadata &getStaticInfo(const String8 &cameraId);
+
private:
// StreamSurfaceId encapsulates streamId + surfaceId for a particular surface.
// streamId specifies the index of the stream the surface belongs to, and the
@@ -305,6 +313,8 @@
int32_t mRequestIdCounter;
+ std::vector<std::string> mPhysicalCameraIds;
+
// The list of output streams whose surfaces are deferred. We have to track them separately
// as there are no surfaces available and can not be put into mStreamMap. Once the deferred
// Surface is configured, the stream id will be moved to mStreamMap.
@@ -313,6 +323,12 @@
// stream ID -> outputStreamInfo mapping
std::unordered_map<int32_t, OutputStreamInfo> mStreamInfoMap;
+ // map high resolution camera id (logical / physical) -> list of stream ids configured
+ std::unordered_map<std::string, std::unordered_set<int>> mHighResolutionCameraIdToStreamIdSet;
+
+ // set of high resolution camera id (logical / physical)
+ std::unordered_set<std::string> mHighResolutionSensors;
+
KeyedVector<sp<IBinder>, sp<CompositeStream>> mCompositeStreamMap;
sp<CameraProviderManager> mProviderManager;
diff --git a/services/camera/libcameraservice/api2/CompositeStream.cpp b/services/camera/libcameraservice/api2/CompositeStream.cpp
index 515b7f2..4b840fc 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/CompositeStream.cpp
@@ -47,7 +47,9 @@
status_t CompositeStream::createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int * id, const String8& physicalCameraId,
- std::vector<int> * surfaceIds, int streamSetId, bool isShared, bool isMultiResolution) {
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> * surfaceIds,
+ int streamSetId, bool isShared, bool isMultiResolution) {
if (hasDeferredConsumer) {
ALOGE("%s: Deferred consumers not supported in case of composite streams!",
__FUNCTION__);
@@ -72,8 +74,8 @@
return BAD_VALUE;
}
- return createInternalStreams(consumers, hasDeferredConsumer, width, height, format, rotation, id,
- physicalCameraId, surfaceIds, streamSetId, isShared);
+ return createInternalStreams(consumers, hasDeferredConsumer, width, height, format, rotation,
+ id, physicalCameraId, sensorPixelModesUsed, surfaceIds, streamSetId, isShared);
}
status_t CompositeStream::deleteStream() {
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
index 1bf137a..600bd28 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.h
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -44,7 +44,9 @@
status_t createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution);
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int streamSetId, bool isShared, bool isMultiResolution);
status_t deleteStream();
@@ -55,7 +57,9 @@
virtual status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int streamSetId, bool isShared) = 0;
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int streamSetId, bool isShared) = 0;
// Release all internal streams and corresponding resources.
virtual status_t deleteInternalStreams() = 0;
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index 2c553f3..19b54e0 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -20,6 +20,7 @@
#include "api1/client2/JpegProcessor.h"
#include "common/CameraProviderManager.h"
+#include "utils/SessionConfigurationUtils.h"
#include <gui/Surface.h>
#include <utils/Log.h>
#include <utils/Trace.h>
@@ -78,7 +79,10 @@
}
}
- getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes);
+ getSupportedDepthSizes(staticInfo, /*maxResolution*/false, &mSupportedDepthSizes);
+ if (SessionConfigurationUtils::isUltraHighResolutionSensor(staticInfo)) {
+ getSupportedDepthSizes(staticInfo, true, &mSupportedDepthSizesMaximumResolution);
+ }
}
}
@@ -484,17 +488,82 @@
return false;
}
+static bool setContains(std::unordered_set<int32_t> containerSet, int32_t value) {
+ return containerSet.find(value) != containerSet.end();
+}
+
+status_t DepthCompositeStream::checkAndGetMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>> &depthSizes,
+ const std::vector<std::tuple<size_t, size_t>> &depthSizesMaximumResolution,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ size_t *depthWidth, size_t *depthHeight) {
+ if (depthWidth == nullptr || depthHeight == nullptr) {
+ return BAD_VALUE;
+ }
+ size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
+ bool hasDefaultSensorPixelMode =
+ setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
+
+ bool hasMaximumResolutionSensorPixelMode =
+ setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION);
+
+ if (!hasDefaultSensorPixelMode && !hasMaximumResolutionSensorPixelMode) {
+ ALOGE("%s: sensor pixel modes don't contain either maximum resolution or default modes",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (hasDefaultSensorPixelMode) {
+ auto ret = getMatchingDepthSize(width, height, depthSizes, &chosenDepthWidth,
+ &chosenDepthHeight);
+ if (ret != OK) {
+ ALOGE("%s: No matching depth stream size found", __FUNCTION__);
+ return ret;
+ }
+ }
+
+ if (hasMaximumResolutionSensorPixelMode) {
+ size_t depthWidth = 0, depthHeight = 0;
+ auto ret = getMatchingDepthSize(width, height,
+ depthSizesMaximumResolution, &depthWidth, &depthHeight);
+ if (ret != OK) {
+ ALOGE("%s: No matching max resolution depth stream size found", __FUNCTION__);
+ return ret;
+ }
+ // Both matching depth sizes should be the same.
+ if (chosenDepthWidth != 0 && chosenDepthWidth != depthWidth &&
+ chosenDepthHeight != depthHeight) {
+ ALOGE("%s: Maximum resolution sensor pixel mode and default sensor pixel mode don't"
+ " have matching depth sizes", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ if (chosenDepthWidth == 0) {
+ chosenDepthWidth = depthWidth;
+ chosenDepthHeight = depthHeight;
+ }
+ }
+ *depthWidth = chosenDepthWidth;
+ *depthHeight = chosenDepthHeight;
+ return OK;
+}
+
+
status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int /*streamSetId*/, bool /*isShared*/) {
if (mSupportedDepthSizes.empty()) {
ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
return INVALID_OPERATION;
}
size_t depthWidth, depthHeight;
- auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight);
+ auto ret =
+ checkAndGetMatchingDepthSize(width, height, mSupportedDepthSizes,
+ mSupportedDepthSizesMaximumResolution, sensorPixelModesUsed, &depthWidth,
+ &depthHeight);
if (ret != OK) {
ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
return ret;
@@ -515,7 +584,7 @@
mBlobSurface = new Surface(producer);
ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
- id, physicalCameraId, surfaceIds);
+ id, physicalCameraId, sensorPixelModesUsed, surfaceIds);
if (ret == OK) {
mBlobStreamId = *id;
mBlobSurfaceId = (*surfaceIds)[0];
@@ -531,7 +600,8 @@
mDepthSurface = new Surface(producer);
std::vector<int> depthSurfaceId;
ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
- kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId);
+ kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, sensorPixelModesUsed,
+ &depthSurfaceId);
if (ret == OK) {
mDepthSurfaceId = depthSurfaceId[0];
} else {
@@ -749,13 +819,15 @@
return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
}
-void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch,
+void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch, bool maxResolution,
std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
if (depthSizes == nullptr) {
return;
}
- auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
+ auto entry = ch.find(
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, maxResolution));
if (entry.count > 0) {
// Depth stream dimensions have four int32_t components
// (pixelformat, width, height, type)
@@ -779,30 +851,43 @@
}
std::vector<std::tuple<size_t, size_t>> depthSizes;
- getSupportedDepthSizes(ch, &depthSizes);
+ std::vector<std::tuple<size_t, size_t>> depthSizesMaximumResolution;
+ getSupportedDepthSizes(ch, /*maxResolution*/false, &depthSizes);
if (depthSizes.empty()) {
ALOGE("%s: No depth stream configurations present", __FUNCTION__);
return BAD_VALUE;
}
- size_t depthWidth, depthHeight;
- auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth,
- &depthHeight);
+ if (SessionConfigurationUtils::isUltraHighResolutionSensor(ch)) {
+ getSupportedDepthSizes(ch, /*maxResolution*/true, &depthSizesMaximumResolution);
+ if (depthSizesMaximumResolution.empty()) {
+ ALOGE("%s: No depth stream configurations for maximum resolution present",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+ }
+
+ size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
+ auto ret = checkAndGetMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes,
+ depthSizesMaximumResolution, streamInfo.sensorPixelModesUsed, &chosenDepthWidth,
+ &chosenDepthHeight);
+
if (ret != OK) {
- ALOGE("%s: No matching depth stream size found", __FUNCTION__);
+ ALOGE("%s: Couldn't get matching depth sizes", __FUNCTION__);
return ret;
}
compositeOutput->clear();
compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
+ // Sensor pixel modes should stay the same here. They're already overridden.
// Jpeg/Blob stream info
(*compositeOutput)[0].dataSpace = kJpegDataSpace;
(*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
// Depth stream info
- (*compositeOutput)[1].width = depthWidth;
- (*compositeOutput)[1].height = depthHeight;
+ (*compositeOutput)[1].width = chosenDepthWidth;
+ (*compositeOutput)[1].height = chosenDepthHeight;
(*compositeOutput)[1].format = kDepthMapPixelFormat;
(*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
(*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
index 05bc504..a520bbf 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.h
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -51,7 +51,9 @@
status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int streamSetId, bool isShared) override;
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int streamSetId, bool isShared) override;
status_t deleteInternalStreams() override;
status_t configureStream() override;
status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap, Vector<int32_t>* /*out*/outputStreamIds,
@@ -86,11 +88,17 @@
};
// Helper methods
- static void getSupportedDepthSizes(const CameraMetadata& ch,
+ static void getSupportedDepthSizes(const CameraMetadata& ch, bool maxResolution,
std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/);
static status_t getMatchingDepthSize(size_t width, size_t height,
const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
size_t *depthWidth /*out*/, size_t *depthHeight /*out*/);
+ static status_t checkAndGetMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>> &depthSizes,
+ const std::vector<std::tuple<size_t, size_t>> &depthSizesMaximumResolution,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ size_t *depthWidth /*out*/, size_t *depthHeight /*out*/);
+
// Dynamic depth processing
status_t encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in, void *out,
@@ -126,6 +134,7 @@
ssize_t mMaxJpegSize;
std::vector<std::tuple<size_t, size_t>> mSupportedDepthSizes;
+ std::vector<std::tuple<size_t, size_t>> mSupportedDepthSizesMaximumResolution;
std::vector<float> mIntrinsicCalibration, mLensDistortion;
bool mIsLogicalCamera;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 7d68485..582001d 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -36,6 +36,7 @@
#include "common/CameraDeviceBase.h"
#include "utils/ExifUtils.h"
+#include "utils/SessionConfigurationUtils.h"
#include "HeicEncoderInfoManager.h"
#include "HeicCompositeStream.h"
@@ -115,7 +116,9 @@
status_t HeicCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int /*streamSetId*/, bool /*isShared*/) {
sp<CameraDeviceBase> device = mDevice.promote();
if (!device.get()) {
@@ -141,7 +144,8 @@
mStaticInfo = device->info();
res = device->createStream(mAppSegmentSurface, mAppSegmentMaxSize, 1, format,
- kAppSegmentDataSpace, rotation, &mAppSegmentStreamId, physicalCameraId, surfaceIds);
+ kAppSegmentDataSpace, rotation, &mAppSegmentStreamId, physicalCameraId,
+ sensorPixelModesUsed,surfaceIds);
if (res == OK) {
mAppSegmentSurfaceId = (*surfaceIds)[0];
} else {
@@ -177,7 +181,7 @@
int srcStreamFmt = mUseGrid ? HAL_PIXEL_FORMAT_YCbCr_420_888 :
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
res = device->createStream(mMainImageSurface, width, height, srcStreamFmt, kHeifDataSpace,
- rotation, id, physicalCameraId, &sourceSurfaceId);
+ rotation, id, physicalCameraId, sensorPixelModesUsed, &sourceSurfaceId);
if (res == OK) {
mMainImageSurfaceId = sourceSurfaceId[0];
mMainImageStreamId = *id;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
index cbd9d21..1077a1f 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.h
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -46,7 +46,9 @@
status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int streamSetId, bool isShared) override;
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int streamSetId, bool isShared) override;
status_t deleteInternalStreams() override;