cameraserver: Add HAL plumbing + capture request verification for quad bayer sensor apis.
- Verify that for 'high resolution' sensors, capture requests have
sensor pixel modes which are consistent with what their output targets
were configured with.
- Add support for
@3.7::ICameraDevice::isSessionConfigurationSupported_3_7
@3.7::ICameraDevice::configureStreams_3_7
@2.7::ICameraProvider::isConcurrentSessionConfigurationSupported_2_7
- For ZoomRatio(Distortion)Mapper, use MAXIMUM_RESOLUTION variants of SENSOR_INFO*
and LENS_CALIBRATION / LENS_DISTORTION while doing coordinate calculations.
Bug: 152813564
Test: Camera CTS
Test: Camera binder tests
Change-Id: I41a86a55e619b25e17e701955ba8c345013329b9
Signed-off-by: Jayant Chowdhary <jchowdhary@google.com>
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index 4c3ded6..ee764ec 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -158,7 +158,7 @@
res = device->createStream(mCallbackWindow,
params.previewWidth, params.previewHeight, callbackFormat,
HAL_DATASPACE_V0_JFIF, CAMERA_STREAM_ROTATION_0, &mCallbackStreamId,
- String8());
+ String8(), std::unordered_set<int32_t>{ANDROID_SENSOR_PIXEL_MODE_DEFAULT});
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
"%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index ff2e398..eed2654 100755
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -151,7 +151,7 @@
params.pictureWidth, params.pictureHeight,
HAL_PIXEL_FORMAT_BLOB, HAL_DATASPACE_V0_JFIF,
CAMERA_STREAM_ROTATION_0, &mCaptureStreamId,
- String8());
+ String8(), std::unordered_set<int32_t>{ANDROID_SENSOR_PIXEL_MODE_DEFAULT});
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for capture: "
"%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 3a709c9..02ac638 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -56,7 +56,7 @@
int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION
int pictureWidth, pictureHeight;
- // Store the picture size before they are overriden by video snapshot
+ // Store the picture size before they are overridden by video snapshot
int pictureWidthLastSet, pictureHeightLastSet;
bool pictureSizeOverriden;
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 8b1eb28..2d3597c 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -198,7 +198,8 @@
res = device->createStream(mPreviewWindow,
params.previewWidth, params.previewHeight,
CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, HAL_DATASPACE_UNKNOWN,
- CAMERA_STREAM_ROTATION_0, &mPreviewStreamId, String8());
+ CAMERA_STREAM_ROTATION_0, &mPreviewStreamId, String8(),
+ std::unordered_set<int32_t>{ANDROID_SENSOR_PIXEL_MODE_DEFAULT});
if (res != OK) {
ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
@@ -384,7 +385,7 @@
params.videoWidth, params.videoHeight,
params.videoFormat, params.videoDataSpace,
CAMERA_STREAM_ROTATION_0, &mRecordingStreamId,
- String8());
+ String8(), std::unordered_set<int32_t>{ANDROID_SENSOR_PIXEL_MODE_DEFAULT});
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for recording: "
"%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 9fdc727..8e598f1 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -261,7 +261,7 @@
res = device->createStream(outSurface, params.fastInfo.maxZslSize.width,
params.fastInfo.maxZslSize.height, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
HAL_DATASPACE_UNKNOWN, CAMERA_STREAM_ROTATION_0, &mZslStreamId,
- String8());
+ String8(), std::unordered_set<int32_t>{ANDROID_SENSOR_PIXEL_MODE_DEFAULT});
if (res != OK) {
ALOGE("%s: Camera %d: Can't create ZSL stream: "
"%s (%d)", __FUNCTION__, client->getCameraId(),
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 8cccbb1..1b65d1a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -125,8 +125,8 @@
/*listener*/this,
/*sendPartials*/true);
- auto deviceInfo = mDevice->info();
- camera_metadata_entry_t physicalKeysEntry = deviceInfo.find(
+ const CameraMetadata &deviceInfo = mDevice->info();
+ camera_metadata_ro_entry_t physicalKeysEntry = deviceInfo.find(
ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS);
if (physicalKeysEntry.count > 0) {
mSupportedPhysicalRequestKeys.insert(mSupportedPhysicalRequestKeys.begin(),
@@ -135,6 +135,17 @@
}
mProviderManager = providerPtr;
+ // Cache physical camera ids corresponding to this device and also the high
+ // resolution sensors in this device + physical camera ids
+ mProviderManager->isLogicalCamera(mCameraIdStr.string(), &mPhysicalCameraIds);
+ if (isUltraHighResolutionSensor(mCameraIdStr)) {
+ mHighResolutionSensors.insert(mCameraIdStr.string());
+ }
+ for (auto &physicalId : mPhysicalCameraIds) {
+ if (isUltraHighResolutionSensor(String8(physicalId.c_str()))) {
+ mHighResolutionSensors.insert(physicalId.c_str());
+ }
+ }
return OK;
}
@@ -186,6 +197,17 @@
return binder::Status::ok();
}
+static std::list<int> getIntersection(const std::unordered_set<int> &streamIdsForThisCamera,
+ const Vector<int> &streamIdsForThisRequest) {
+ std::list<int> intersection;
+ for (auto &streamId : streamIdsForThisRequest) {
+ if (streamIdsForThisCamera.find(streamId) != streamIdsForThisCamera.end()) {
+ intersection.emplace_back(streamId);
+ }
+ }
+ return intersection;
+}
+
binder::Status CameraDeviceClient::submitRequestList(
const std::vector<hardware::camera2::CaptureRequest>& requests,
bool streaming,
@@ -332,6 +354,24 @@
"Request settings are empty");
}
+ // Check whether the physical / logical stream has settings
+ // consistent with the sensor pixel mode(s) it was configured with.
+ // mCameraIdToStreamSet will only have ids that are high resolution
+ const auto streamIdSetIt = mHighResolutionCameraIdToStreamIdSet.find(it.id);
+ if (streamIdSetIt != mHighResolutionCameraIdToStreamIdSet.end()) {
+ std::list<int> streamIdsUsedInRequest = getIntersection(streamIdSetIt->second,
+ outputStreamIds);
+ if (!request.mIsReprocess &&
+ !isSensorPixelModeConsistent(streamIdsUsedInRequest, it.settings)) {
+ ALOGE("%s: Camera %s: Request settings CONTROL_SENSOR_PIXEL_MODE not "
+ "consistent with configured streams. Rejecting request.",
+ __FUNCTION__, it.id.c_str());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Request settings CONTROL_SENSOR_PIXEL_MODE are not consistent with "
+ "streams configured");
+ }
+ }
+
String8 physicalId(it.id.c_str());
if (physicalId != mDevice->getId()) {
auto found = std::find(requestedPhysicalIds.begin(), requestedPhysicalIds.end(),
@@ -494,7 +534,7 @@
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
- res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
+ res = camera3::SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
mCameraIdStr);
if (!res.isOk()) {
return res;
@@ -560,8 +600,8 @@
binder::Status CameraDeviceClient::isSessionConfigurationSupported(
const SessionConfiguration& sessionConfiguration, bool *status /*out*/) {
- ATRACE_CALL();
+ ATRACE_CALL();
binder::Status res;
status_t ret = OK;
if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
@@ -573,7 +613,7 @@
}
auto operatingMode = sessionConfiguration.getOperatingMode();
- res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
+ res = camera3::SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
mCameraIdStr);
if (!res.isOk()) {
return res;
@@ -589,7 +629,7 @@
metadataGetter getMetadata = [this](const String8 &id) {return mDevice->infoPhysical(id);};
std::vector<std::string> physicalCameraIds;
mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
- res = SessionConfigurationUtils::convertToHALStreamCombination(sessionConfiguration,
+ res = camera3::SessionConfigurationUtils::convertToHALStreamCombination(sessionConfiguration,
mCameraIdStr, mDevice->info(), getMetadata, physicalCameraIds, streamConfiguration,
&earlyExit);
if (!res.isOk()) {
@@ -714,6 +754,13 @@
}
mCompositeStreamMap.removeItemsAt(compositeIndex);
}
+ for (auto &mapIt: mHighResolutionCameraIdToStreamIdSet) {
+ auto &streamSet = mapIt.second;
+ if (streamSet.find(streamId) != streamSet.end()) {
+ streamSet.erase(streamId);
+ break;
+ }
+ }
}
}
@@ -740,7 +787,7 @@
bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
bool isMultiResolution = outputConfiguration.isMultiResolution();
- res = SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
+ res = camera3::SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
outputConfiguration.getSurfaceType());
if (!res.isOk()) {
return res;
@@ -749,10 +796,8 @@
if (!mDevice.get()) {
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
- std::vector<std::string> physicalCameraIds;
- mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
- res = SessionConfigurationUtils::checkPhysicalCameraId(physicalCameraIds, physicalCameraId,
- mCameraIdStr);
+ res = camera3::SessionConfigurationUtils::checkPhysicalCameraId(mPhysicalCameraIds,
+ physicalCameraId, mCameraIdStr);
if (!res.isOk()) {
return res;
}
@@ -768,6 +813,8 @@
OutputStreamInfo streamInfo;
bool isStreamInfoValid = false;
+ const std::vector<int32_t> &sensorPixelModesUsed =
+ outputConfiguration.getSensorPixelModesUsed();
for (auto& bufferProducer : bufferProducers) {
// Don't create multiple streams for the same target surface
sp<IBinder> binder = IInterface::asBinder(bufferProducer);
@@ -780,8 +827,9 @@
}
sp<Surface> surface;
- res = SessionConfigurationUtils::createSurfaceFromGbp(streamInfo, isStreamInfoValid,
- surface, bufferProducer, mCameraIdStr, mDevice->infoPhysical(physicalCameraId));
+ res = camera3::SessionConfigurationUtils::createSurfaceFromGbp(streamInfo,
+ isStreamInfoValid, surface, bufferProducer, mCameraIdStr,
+ mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed);
if (!res.isOk())
return res;
@@ -793,10 +841,10 @@
binders.push_back(IInterface::asBinder(bufferProducer));
surfaces.push_back(surface);
}
-
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
std::vector<int> surfaceIds;
- bool isDepthCompositeStream = camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0]);
+ bool isDepthCompositeStream =
+ camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0]);
bool isHeicCompisiteStream = camera3::HeicCompositeStream::isHeicCompositeStream(surfaces[0]);
if (isDepthCompositeStream || isHeicCompisiteStream) {
sp<CompositeStream> compositeStream;
@@ -809,8 +857,8 @@
err = compositeStream->createStream(surfaces, deferredConsumer, streamInfo.width,
streamInfo.height, streamInfo.format,
static_cast<camera_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
- isShared, isMultiResolution);
+ &streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
+ outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution);
if (err == OK) {
mCompositeStreamMap.add(IInterface::asBinder(surfaces[0]->getIGraphicBufferProducer()),
compositeStream);
@@ -819,8 +867,8 @@
err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
streamInfo.height, streamInfo.format, streamInfo.dataSpace,
static_cast<camera_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
- isShared, isMultiResolution);
+ &streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
+ outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution);
}
if (err != OK) {
@@ -848,6 +896,16 @@
// Set transform flags to ensure preview to be rotated correctly.
res = setStreamTransformLocked(streamId);
+ // Fill in mHighResolutionCameraIdToStreamIdSet map
+ const String8 &cameraIdUsed =
+ physicalCameraId.size() != 0 ? physicalCameraId : mCameraIdStr;
+ const char *cameraIdUsedCStr = cameraIdUsed.string();
+ // Only needed for high resolution sensors
+ if (mHighResolutionSensors.find(cameraIdUsedCStr) !=
+ mHighResolutionSensors.end()) {
+ mHighResolutionCameraIdToStreamIdSet[cameraIdUsedCStr].insert(streamId);
+ }
+
*newStreamId = streamId;
}
@@ -884,10 +942,25 @@
std::vector<sp<Surface>> noSurface;
std::vector<int> surfaceIds;
String8 physicalCameraId(outputConfiguration.getPhysicalCameraId());
+ const String8 &cameraIdUsed =
+ physicalCameraId.size() != 0 ? physicalCameraId : mCameraIdStr;
+ // Here, we override sensor pixel modes
+ std::unordered_set<int32_t> overriddenSensorPixelModesUsed;
+ const std::vector<int32_t> &sensorPixelModesUsed =
+ outputConfiguration.getSensorPixelModesUsed();
+ if (camera3::SessionConfigurationUtils::checkAndOverrideSensorPixelModesUsed(
+ sensorPixelModesUsed, format, width, height, getStaticInfo(cameraIdUsed),
+ /*allowRounding*/ false, &overriddenSensorPixelModesUsed) != OK) {
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "sensor pixel modes used not valid for deferred stream");
+ }
+
err = mDevice->createStream(noSurface, /*hasDeferredConsumer*/true, width,
height, format, dataSpace,
static_cast<camera_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, physicalCameraId, &surfaceIds,
+ &streamId, physicalCameraId,
+ overriddenSensorPixelModesUsed,
+ &surfaceIds,
outputConfiguration.getSurfaceSetID(), isShared,
outputConfiguration.isMultiResolution(), consumerUsage);
@@ -900,9 +973,9 @@
// a separate list to track. Once the deferred surface is set, this id will be
// relocated to mStreamMap.
mDeferredStreams.push_back(streamId);
-
mStreamInfoMap.emplace(std::piecewise_construct, std::forward_as_tuple(streamId),
- std::forward_as_tuple(width, height, format, dataSpace, consumerUsage));
+ std::forward_as_tuple(width, height, format, dataSpace, consumerUsage,
+ overriddenSensorPixelModesUsed));
ALOGV("%s: Camera %s: Successfully created a new stream ID %d for a deferred surface"
" (%d x %d) stream with format 0x%x.",
@@ -912,6 +985,13 @@
res = setStreamTransformLocked(streamId);
*newStreamId = streamId;
+ // Fill in mHighResolutionCameraIdToStreamIdSet
+ const char *cameraIdUsedCStr = cameraIdUsed.string();
+ // Only needed for high resolution sensors
+ if (mHighResolutionSensors.find(cameraIdUsedCStr) !=
+ mHighResolutionSensors.end()) {
+ mHighResolutionCameraIdToStreamIdSet[cameraIdUsed.string()].insert(streamId);
+ }
}
return res;
}
@@ -1081,13 +1161,15 @@
newOutputsMap.removeItemsAt(idx);
}
}
+ const std::vector<int32_t> &sensorPixelModesUsed =
+ outputConfiguration.getSensorPixelModesUsed();
for (size_t i = 0; i < newOutputsMap.size(); i++) {
OutputStreamInfo outInfo;
sp<Surface> surface;
- res = SessionConfigurationUtils::createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false,
- surface, newOutputsMap.valueAt(i), mCameraIdStr,
- mDevice->infoPhysical(physicalCameraId));
+ res = camera3::SessionConfigurationUtils::createSurfaceFromGbp(outInfo,
+ /*isStreamInfoValid*/ false, surface, newOutputsMap.valueAt(i), mCameraIdStr,
+ mDevice->infoPhysical(physicalCameraId), sensorPixelModesUsed);
if (!res.isOk())
return res;
@@ -1442,6 +1524,8 @@
}
std::vector<sp<Surface>> consumerSurfaces;
+ const std::vector<int32_t> &sensorPixelModesUsed =
+ outputConfiguration.getSensorPixelModesUsed();
for (auto& bufferProducer : bufferProducers) {
// Don't create multiple streams for the same target surface
ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
@@ -1452,9 +1536,9 @@
}
sp<Surface> surface;
- res = SessionConfigurationUtils::createSurfaceFromGbp(mStreamInfoMap[streamId],
+ res = camera3::SessionConfigurationUtils::createSurfaceFromGbp(mStreamInfoMap[streamId],
true /*isStreamInfoValid*/, surface, bufferProducer, mCameraIdStr,
- mDevice->infoPhysical(physicalId));
+ mDevice->infoPhysical(physicalId), sensorPixelModesUsed);
if (!res.isOk())
return res;
@@ -1936,4 +2020,54 @@
return ret;
}
+
+const CameraMetadata &CameraDeviceClient::getStaticInfo(const String8 &cameraId) {
+ if (mDevice->getId() == cameraId) {
+ return mDevice->info();
+ }
+ return mDevice->infoPhysical(cameraId);
+}
+
+bool CameraDeviceClient::isUltraHighResolutionSensor(const String8 &cameraId) {
+ const CameraMetadata &deviceInfo = getStaticInfo(cameraId);
+ return camera3::SessionConfigurationUtils::isUltraHighResolutionSensor(deviceInfo);
+}
+
+bool CameraDeviceClient::isSensorPixelModeConsistent(
+ const std::list<int> &streamIdList, const CameraMetadata &settings) {
+ // First we get the sensorPixelMode from the settings metadata.
+ int32_t sensorPixelMode = ANDROID_SENSOR_PIXEL_MODE_DEFAULT;
+ camera_metadata_ro_entry sensorPixelModeEntry = settings.find(ANDROID_SENSOR_PIXEL_MODE);
+ if (sensorPixelModeEntry.count != 0) {
+ sensorPixelMode = sensorPixelModeEntry.data.u8[0];
+ if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT &&
+ sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) {
+ ALOGE("%s: Request sensor pixel mode not is not one of the valid values %d",
+ __FUNCTION__, sensorPixelMode);
+ return false;
+ }
+ }
+ // Check whether each stream has max resolution allowed.
+ bool consistent = true;
+ for (auto it : streamIdList) {
+ auto const streamInfoIt = mStreamInfoMap.find(it);
+ if (streamInfoIt == mStreamInfoMap.end()) {
+ ALOGE("%s: stream id %d not created, skipping", __FUNCTION__, it);
+ return false;
+ }
+ consistent =
+ streamInfoIt->second.sensorPixelModesUsed.find(sensorPixelMode) !=
+ streamInfoIt->second.sensorPixelModesUsed.end();
+ if (!consistent) {
+ ALOGE("sensorPixelMode used %i not consistent with configured modes", sensorPixelMode);
+ for (auto m : streamInfoIt->second.sensorPixelModesUsed) {
+ ALOGE("sensor pixel mode used list: %i", m);
+ }
+ break;
+ }
+ }
+
+ return consistent;
+}
+
} // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 9f7a4af..adedf92 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -28,6 +28,7 @@
#include "common/FrameProcessorBase.h"
#include "common/Camera2ClientBase.h"
#include "CompositeStream.h"
+#include "utils/SessionConfigurationUtils.h"
using android::camera3::OutputStreamInfo;
using android::camera3::CompositeStream;
@@ -222,6 +223,13 @@
// Calculate the ANativeWindow transform from android.sensor.orientation
status_t getRotationTransformLocked(/*out*/int32_t* transform);
+ bool isUltraHighResolutionSensor(const String8 &cameraId);
+
+ bool isSensorPixelModeConsistent(const std::list<int> &streamIdList,
+ const CameraMetadata &settings);
+
+ const CameraMetadata &getStaticInfo(const String8 &cameraId);
+
private:
// StreamSurfaceId encapsulates streamId + surfaceId for a particular surface.
// streamId specifies the index of the stream the surface belongs to, and the
@@ -305,6 +313,8 @@
int32_t mRequestIdCounter;
+ std::vector<std::string> mPhysicalCameraIds;
+
// The list of output streams whose surfaces are deferred. We have to track them separately
// as there are no surfaces available and can not be put into mStreamMap. Once the deferred
// Surface is configured, the stream id will be moved to mStreamMap.
@@ -313,6 +323,12 @@
// stream ID -> outputStreamInfo mapping
std::unordered_map<int32_t, OutputStreamInfo> mStreamInfoMap;
+ // map high resolution camera id (logical / physical) -> list of stream ids configured
+ std::unordered_map<std::string, std::unordered_set<int>> mHighResolutionCameraIdToStreamIdSet;
+
+ // set of high resolution camera id (logical / physical)
+ std::unordered_set<std::string> mHighResolutionSensors;
+
KeyedVector<sp<IBinder>, sp<CompositeStream>> mCompositeStreamMap;
sp<CameraProviderManager> mProviderManager;
diff --git a/services/camera/libcameraservice/api2/CompositeStream.cpp b/services/camera/libcameraservice/api2/CompositeStream.cpp
index 515b7f2..4b840fc 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/CompositeStream.cpp
@@ -47,7 +47,9 @@
status_t CompositeStream::createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int * id, const String8& physicalCameraId,
- std::vector<int> * surfaceIds, int streamSetId, bool isShared, bool isMultiResolution) {
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> * surfaceIds,
+ int streamSetId, bool isShared, bool isMultiResolution) {
if (hasDeferredConsumer) {
ALOGE("%s: Deferred consumers not supported in case of composite streams!",
__FUNCTION__);
@@ -72,8 +74,8 @@
return BAD_VALUE;
}
- return createInternalStreams(consumers, hasDeferredConsumer, width, height, format, rotation, id,
- physicalCameraId, surfaceIds, streamSetId, isShared);
+ return createInternalStreams(consumers, hasDeferredConsumer, width, height, format, rotation,
+ id, physicalCameraId, sensorPixelModesUsed, surfaceIds, streamSetId, isShared);
}
status_t CompositeStream::deleteStream() {
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
index 1bf137a..600bd28 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.h
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -44,7 +44,9 @@
status_t createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution);
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int streamSetId, bool isShared, bool isMultiResolution);
status_t deleteStream();
@@ -55,7 +57,9 @@
virtual status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int streamSetId, bool isShared) = 0;
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int streamSetId, bool isShared) = 0;
// Release all internal streams and corresponding resources.
virtual status_t deleteInternalStreams() = 0;
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index 2c553f3..19b54e0 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -20,6 +20,7 @@
#include "api1/client2/JpegProcessor.h"
#include "common/CameraProviderManager.h"
+#include "utils/SessionConfigurationUtils.h"
#include <gui/Surface.h>
#include <utils/Log.h>
#include <utils/Trace.h>
@@ -78,7 +79,10 @@
}
}
- getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes);
+ getSupportedDepthSizes(staticInfo, /*maxResolution*/false, &mSupportedDepthSizes);
+ if (SessionConfigurationUtils::isUltraHighResolutionSensor(staticInfo)) {
+ getSupportedDepthSizes(staticInfo, true, &mSupportedDepthSizesMaximumResolution);
+ }
}
}
@@ -484,17 +488,82 @@
return false;
}
+static bool setContains(std::unordered_set<int32_t> containerSet, int32_t value) {
+ return containerSet.find(value) != containerSet.end();
+}
+
+status_t DepthCompositeStream::checkAndGetMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>> &depthSizes,
+ const std::vector<std::tuple<size_t, size_t>> &depthSizesMaximumResolution,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ size_t *depthWidth, size_t *depthHeight) {
+ if (depthWidth == nullptr || depthHeight == nullptr) {
+ return BAD_VALUE;
+ }
+ size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
+ bool hasDefaultSensorPixelMode =
+ setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
+
+ bool hasMaximumResolutionSensorPixelMode =
+ setContains(sensorPixelModesUsed, ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION);
+
+ if (!hasDefaultSensorPixelMode && !hasMaximumResolutionSensorPixelMode) {
+ ALOGE("%s: sensor pixel modes don't contain either maximum resolution or default modes",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (hasDefaultSensorPixelMode) {
+ auto ret = getMatchingDepthSize(width, height, depthSizes, &chosenDepthWidth,
+ &chosenDepthHeight);
+ if (ret != OK) {
+ ALOGE("%s: No matching depth stream size found", __FUNCTION__);
+ return ret;
+ }
+ }
+
+ if (hasMaximumResolutionSensorPixelMode) {
+ size_t depthWidth = 0, depthHeight = 0;
+ auto ret = getMatchingDepthSize(width, height,
+ depthSizesMaximumResolution, &depthWidth, &depthHeight);
+ if (ret != OK) {
+ ALOGE("%s: No matching max resolution depth stream size found", __FUNCTION__);
+ return ret;
+ }
+ // Both matching depth sizes should be the same.
+ if (chosenDepthWidth != 0 && chosenDepthWidth != depthWidth &&
+ chosenDepthHeight != depthHeight) {
+ ALOGE("%s: Maximum resolution sensor pixel mode and default sensor pixel mode don't"
+ " have matching depth sizes", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ if (chosenDepthWidth == 0) {
+ chosenDepthWidth = depthWidth;
+ chosenDepthHeight = depthHeight;
+ }
+ }
+ *depthWidth = chosenDepthWidth;
+ *depthHeight = chosenDepthHeight;
+ return OK;
+}
+
+
status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int /*streamSetId*/, bool /*isShared*/) {
if (mSupportedDepthSizes.empty()) {
ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
return INVALID_OPERATION;
}
size_t depthWidth, depthHeight;
- auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight);
+ auto ret =
+ checkAndGetMatchingDepthSize(width, height, mSupportedDepthSizes,
+ mSupportedDepthSizesMaximumResolution, sensorPixelModesUsed, &depthWidth,
+ &depthHeight);
if (ret != OK) {
ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
return ret;
@@ -515,7 +584,7 @@
mBlobSurface = new Surface(producer);
ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
- id, physicalCameraId, surfaceIds);
+ id, physicalCameraId, sensorPixelModesUsed, surfaceIds);
if (ret == OK) {
mBlobStreamId = *id;
mBlobSurfaceId = (*surfaceIds)[0];
@@ -531,7 +600,8 @@
mDepthSurface = new Surface(producer);
std::vector<int> depthSurfaceId;
ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
- kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId);
+ kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, sensorPixelModesUsed,
+ &depthSurfaceId);
if (ret == OK) {
mDepthSurfaceId = depthSurfaceId[0];
} else {
@@ -749,13 +819,15 @@
return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
}
-void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch,
+void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch, bool maxResolution,
std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
if (depthSizes == nullptr) {
return;
}
- auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
+ auto entry = ch.find(
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, maxResolution));
if (entry.count > 0) {
// Depth stream dimensions have four int32_t components
// (pixelformat, width, height, type)
@@ -779,30 +851,43 @@
}
std::vector<std::tuple<size_t, size_t>> depthSizes;
- getSupportedDepthSizes(ch, &depthSizes);
+ std::vector<std::tuple<size_t, size_t>> depthSizesMaximumResolution;
+ getSupportedDepthSizes(ch, /*maxResolution*/false, &depthSizes);
if (depthSizes.empty()) {
ALOGE("%s: No depth stream configurations present", __FUNCTION__);
return BAD_VALUE;
}
- size_t depthWidth, depthHeight;
- auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth,
- &depthHeight);
+ if (SessionConfigurationUtils::isUltraHighResolutionSensor(ch)) {
+ getSupportedDepthSizes(ch, /*maxResolution*/true, &depthSizesMaximumResolution);
+ if (depthSizesMaximumResolution.empty()) {
+ ALOGE("%s: No depth stream configurations for maximum resolution present",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+ }
+
+ size_t chosenDepthWidth = 0, chosenDepthHeight = 0;
+ auto ret = checkAndGetMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes,
+ depthSizesMaximumResolution, streamInfo.sensorPixelModesUsed, &chosenDepthWidth,
+ &chosenDepthHeight);
+
if (ret != OK) {
- ALOGE("%s: No matching depth stream size found", __FUNCTION__);
+ ALOGE("%s: Couldn't get matching depth sizes", __FUNCTION__);
return ret;
}
compositeOutput->clear();
compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
+ // Sensor pixel modes should stay the same here. They're already overridden.
// Jpeg/Blob stream info
(*compositeOutput)[0].dataSpace = kJpegDataSpace;
(*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
// Depth stream info
- (*compositeOutput)[1].width = depthWidth;
- (*compositeOutput)[1].height = depthHeight;
+ (*compositeOutput)[1].width = chosenDepthWidth;
+ (*compositeOutput)[1].height = chosenDepthHeight;
(*compositeOutput)[1].format = kDepthMapPixelFormat;
(*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
(*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
index 05bc504..a520bbf 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.h
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -51,7 +51,9 @@
status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int streamSetId, bool isShared) override;
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int streamSetId, bool isShared) override;
status_t deleteInternalStreams() override;
status_t configureStream() override;
status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap, Vector<int32_t>* /*out*/outputStreamIds,
@@ -86,11 +88,17 @@
};
// Helper methods
- static void getSupportedDepthSizes(const CameraMetadata& ch,
+ static void getSupportedDepthSizes(const CameraMetadata& ch, bool maxResolution,
std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/);
static status_t getMatchingDepthSize(size_t width, size_t height,
const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
size_t *depthWidth /*out*/, size_t *depthHeight /*out*/);
+ static status_t checkAndGetMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>> &depthSizes,
+ const std::vector<std::tuple<size_t, size_t>> &depthSizesMaximumResolution,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ size_t *depthWidth /*out*/, size_t *depthHeight /*out*/);
+
// Dynamic depth processing
status_t encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in, void *out,
@@ -126,6 +134,7 @@
ssize_t mMaxJpegSize;
std::vector<std::tuple<size_t, size_t>> mSupportedDepthSizes;
+ std::vector<std::tuple<size_t, size_t>> mSupportedDepthSizesMaximumResolution;
std::vector<float> mIntrinsicCalibration, mLensDistortion;
bool mIsLogicalCamera;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 7d68485..582001d 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -36,6 +36,7 @@
#include "common/CameraDeviceBase.h"
#include "utils/ExifUtils.h"
+#include "utils/SessionConfigurationUtils.h"
#include "HeicEncoderInfoManager.h"
#include "HeicCompositeStream.h"
@@ -115,7 +116,9 @@
status_t HeicCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int /*streamSetId*/, bool /*isShared*/) {
sp<CameraDeviceBase> device = mDevice.promote();
if (!device.get()) {
@@ -141,7 +144,8 @@
mStaticInfo = device->info();
res = device->createStream(mAppSegmentSurface, mAppSegmentMaxSize, 1, format,
- kAppSegmentDataSpace, rotation, &mAppSegmentStreamId, physicalCameraId, surfaceIds);
+ kAppSegmentDataSpace, rotation, &mAppSegmentStreamId, physicalCameraId,
+ sensorPixelModesUsed,surfaceIds);
if (res == OK) {
mAppSegmentSurfaceId = (*surfaceIds)[0];
} else {
@@ -177,7 +181,7 @@
int srcStreamFmt = mUseGrid ? HAL_PIXEL_FORMAT_YCbCr_420_888 :
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
res = device->createStream(mMainImageSurface, width, height, srcStreamFmt, kHeifDataSpace,
- rotation, id, physicalCameraId, &sourceSurfaceId);
+ rotation, id, physicalCameraId, sensorPixelModesUsed, &sourceSurfaceId);
if (res == OK) {
mMainImageSurfaceId = sourceSurfaceId[0];
mMainImageStreamId = *id;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
index cbd9d21..1077a1f 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.h
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -46,7 +46,9 @@
status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int streamSetId, bool isShared) override;
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int streamSetId, bool isShared) override;
status_t deleteInternalStreams() override;
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 5acbb99..85b0cc2 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -164,6 +164,7 @@
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds = nullptr,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
bool isShared = false, bool isMultiResolution = false,
@@ -180,6 +181,7 @@
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds = nullptr,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
bool isShared = false, bool isMultiResolution = false,
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index dfe2409..62fc18f 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -686,9 +686,39 @@
}
}
-status_t CameraProviderManager::ProviderInfo::DeviceInfo3::addDynamicDepthTags() {
- uint32_t depthExclTag = ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE;
- uint32_t depthSizesTag = ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS;
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::addDynamicDepthTags(
+ bool maxResolution) {
+ const int32_t depthExclTag = ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE;
+
+ const int32_t scalerSizesTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, maxResolution);
+ const int32_t scalerMinFrameDurationsTag =
+ ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS;
+ const int32_t scalerStallDurationsTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SCALER_AVAILABLE_STALL_DURATIONS, maxResolution);
+
+ const int32_t depthSizesTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, maxResolution);
+ const int32_t depthStallDurationsTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS, maxResolution);
+ const int32_t depthMinFrameDurationsTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS, maxResolution);
+
+ const int32_t dynamicDepthSizesTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS, maxResolution);
+ const int32_t dynamicDepthStallDurationsTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS, maxResolution);
+ const int32_t dynamicDepthMinFrameDurationsTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS, maxResolution);
+
auto& c = mCameraCharacteristics;
std::vector<std::tuple<size_t, size_t>> supportedBlobSizes, supportedDepthSizes,
supportedDynamicDepthSizes, internalDepthSizes;
@@ -718,7 +748,7 @@
return BAD_VALUE;
}
- getSupportedSizes(c, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, HAL_PIXEL_FORMAT_BLOB,
+ getSupportedSizes(c, scalerSizesTag, HAL_PIXEL_FORMAT_BLOB,
&supportedBlobSizes);
getSupportedSizes(c, depthSizesTag, HAL_PIXEL_FORMAT_Y16, &supportedDepthSizes);
if (supportedBlobSizes.empty() || supportedDepthSizes.empty()) {
@@ -745,10 +775,10 @@
std::vector<int64_t> blobMinDurations, blobStallDurations;
std::vector<int64_t> dynamicDepthMinDurations, dynamicDepthStallDurations;
- getSupportedDurations(c, ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS,
- HAL_PIXEL_FORMAT_Y16, internalDepthSizes, &depthMinDurations);
- getSupportedDurations(c, ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
- HAL_PIXEL_FORMAT_BLOB, supportedDynamicDepthSizes, &blobMinDurations);
+ getSupportedDurations(c, depthMinFrameDurationsTag, HAL_PIXEL_FORMAT_Y16, internalDepthSizes,
+ &depthMinDurations);
+ getSupportedDurations(c, scalerMinFrameDurationsTag, HAL_PIXEL_FORMAT_BLOB,
+ supportedDynamicDepthSizes, &blobMinDurations);
if (blobMinDurations.empty() || depthMinDurations.empty() ||
(depthMinDurations.size() != blobMinDurations.size())) {
ALOGE("%s: Unexpected number of available depth min durations! %zu vs. %zu",
@@ -756,10 +786,10 @@
return BAD_VALUE;
}
- getSupportedDurations(c, ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS,
- HAL_PIXEL_FORMAT_Y16, internalDepthSizes, &depthStallDurations);
- getSupportedDurations(c, ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
- HAL_PIXEL_FORMAT_BLOB, supportedDynamicDepthSizes, &blobStallDurations);
+ getSupportedDurations(c, depthStallDurationsTag, HAL_PIXEL_FORMAT_Y16, internalDepthSizes,
+ &depthStallDurations);
+ getSupportedDurations(c, scalerStallDurationsTag, HAL_PIXEL_FORMAT_BLOB,
+ supportedDynamicDepthSizes, &blobStallDurations);
if (blobStallDurations.empty() || depthStallDurations.empty() ||
(depthStallDurations.size() != blobStallDurations.size())) {
ALOGE("%s: Unexpected number of available depth stall durations! %zu vs. %zu",
@@ -804,15 +834,14 @@
supportedChTags.reserve(chTags.count + 3);
supportedChTags.insert(supportedChTags.end(), chTags.data.i32,
chTags.data.i32 + chTags.count);
- supportedChTags.push_back(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS);
- supportedChTags.push_back(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS);
- supportedChTags.push_back(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS);
- c.update(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS,
- dynamicDepthEntries.data(), dynamicDepthEntries.size());
- c.update(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS,
- dynamicDepthMinDurationEntries.data(), dynamicDepthMinDurationEntries.size());
- c.update(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS,
- dynamicDepthStallDurationEntries.data(), dynamicDepthStallDurationEntries.size());
+ supportedChTags.push_back(dynamicDepthSizesTag);
+ supportedChTags.push_back(dynamicDepthMinFrameDurationsTag);
+ supportedChTags.push_back(dynamicDepthStallDurationsTag);
+ c.update(dynamicDepthSizesTag, dynamicDepthEntries.data(), dynamicDepthEntries.size());
+ c.update(dynamicDepthMinFrameDurationsTag, dynamicDepthMinDurationEntries.data(),
+ dynamicDepthMinDurationEntries.size());
+ c.update(dynamicDepthStallDurationsTag, dynamicDepthStallDurationEntries.data(),
+ dynamicDepthStallDurationEntries.size());
c.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, supportedChTags.data(),
supportedChTags.size());
@@ -1046,7 +1075,24 @@
return OK;
}
-status_t CameraProviderManager::ProviderInfo::DeviceInfo3::deriveHeicTags() {
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::deriveHeicTags(bool maxResolution) {
+ int32_t scalerStreamSizesTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, maxResolution);
+ int32_t scalerMinFrameDurationsTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS, maxResolution);
+
+ int32_t heicStreamSizesTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS, maxResolution);
+ int32_t heicMinFrameDurationsTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS, maxResolution);
+ int32_t heicStallDurationsTag =
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS, maxResolution);
+
auto& c = mCameraCharacteristics;
camera_metadata_entry halHeicSupport = c.find(ANDROID_HEIC_INFO_SUPPORTED);
@@ -1075,10 +1121,8 @@
std::vector<int64_t> heicDurations;
std::vector<int64_t> heicStallDurations;
- camera_metadata_entry halStreamConfigs =
- c.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
- camera_metadata_entry minFrameDurations =
- c.find(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
+ camera_metadata_entry halStreamConfigs = c.find(scalerStreamSizesTag);
+ camera_metadata_entry minFrameDurations = c.find(scalerMinFrameDurationsTag);
status_t res = fillHeicStreamCombinations(&heicOutputs, &heicDurations, &heicStallDurations,
halStreamConfigs, minFrameDurations);
@@ -1088,12 +1132,9 @@
return res;
}
- c.update(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS,
- heicOutputs.data(), heicOutputs.size());
- c.update(ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS,
- heicDurations.data(), heicDurations.size());
- c.update(ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS,
- heicStallDurations.data(), heicStallDurations.size());
+ c.update(heicStreamSizesTag, heicOutputs.data(), heicOutputs.size());
+ c.update(heicMinFrameDurationsTag, heicDurations.data(), heicDurations.size());
+ c.update(heicStallDurationsTag, heicStallDurations.data(), heicStallDurations.size());
return OK;
}
@@ -2005,16 +2046,20 @@
size_t numStreams = halCameraIdsAndStreamCombinations.size();
halCameraIdsAndStreamCombinations_2_6.resize(numStreams);
for (size_t i = 0; i < numStreams; i++) {
+ using namespace camera3;
auto const& combination = halCameraIdsAndStreamCombinations[i];
halCameraIdsAndStreamCombinations_2_6[i].cameraId = combination.cameraId;
bool success =
SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
- halCameraIdsAndStreamCombinations_2_6[i].streamConfiguration,
- combination.streamConfiguration);
+ halCameraIdsAndStreamCombinations_2_6[i].streamConfiguration,
+ combination.streamConfiguration);
if (!success) {
*isSupported = false;
return OK;
}
+ camera3::SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
+ halCameraIdsAndStreamCombinations_2_6[i].streamConfiguration,
+ combination.streamConfiguration);
}
ret = interface_2_6->isConcurrentStreamCombinationSupported(
halCameraIdsAndStreamCombinations_2_6, cb);
@@ -2220,6 +2265,21 @@
ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities: %s (%d)",
__FUNCTION__, strerror(-res), res);
}
+
+ if (camera3::SessionConfigurationUtils::isUltraHighResolutionSensor(mCameraCharacteristics)) {
+ status_t status = addDynamicDepthTags(/*maxResolution*/true);
+ if (OK != status) {
+ ALOGE("%s: Failed appending dynamic depth tags for maximum resolution mode: %s (%d)",
+ __FUNCTION__, strerror(-status), status);
+ }
+
+ status = deriveHeicTags(/*maxResolution*/true);
+ if (OK != status) {
+ ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities for"
+ "maximum resolution mode: %s (%d)", __FUNCTION__, strerror(-status), status);
+ }
+ }
+
res = addRotateCropTags();
if (OK != res) {
ALOGE("%s: Unable to add default SCALER_ROTATE_AND_CROP tags: %s (%d)", __FUNCTION__,
@@ -2426,26 +2486,22 @@
status_t res;
Status callStatus;
::android::hardware::Return<void> ret;
- if (interface_3_7 != nullptr) {
- ret = interface_3_7->isStreamCombinationSupported_3_7(configuration,
+ auto halCb =
[&callStatus, &status] (Status s, bool combStatus) {
callStatus = s;
*status = combStatus;
- });
+ };
+ if (interface_3_7 != nullptr) {
+ ret = interface_3_7->isStreamCombinationSupported_3_7(configuration, halCb);
} else if (interface_3_5 != nullptr) {
hardware::camera::device::V3_4::StreamConfiguration configuration_3_4;
- bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
+ bool success = camera3::SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
configuration_3_4, configuration);
if (!success) {
*status = false;
return OK;
}
-
- ret = interface_3_5->isStreamCombinationSupported(configuration_3_4,
- [&callStatus, &status] (Status s, bool combStatus) {
- callStatus = s;
- *status = combStatus;
- });
+ ret = interface_3_5->isStreamCombinationSupported(configuration_3_4, halCb);
} else {
return INVALID_OPERATION;
}
@@ -2829,7 +2885,7 @@
if (res != OK) {
return res;
}
- metadataGetter getMetadata =
+ camera3::metadataGetter getMetadata =
[this](const String8 &id) {
CameraMetadata physicalDeviceInfo;
getCameraCharacteristicsLocked(id.string(), &physicalDeviceInfo);
@@ -2838,7 +2894,7 @@
std::vector<std::string> physicalCameraIds;
isLogicalCameraLocked(cameraIdAndSessionConfig.mCameraId, &physicalCameraIds);
bStatus =
- SessionConfigurationUtils::convertToHALStreamCombination(
+ camera3::SessionConfigurationUtils::convertToHALStreamCombination(
cameraIdAndSessionConfig.mSessionConfiguration,
String8(cameraIdAndSessionConfig.mCameraId.c_str()), deviceInfo, getMetadata,
physicalCameraIds, streamConfiguration, &shouldExit);
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index fa9cc1c..12bda9b 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -556,8 +556,8 @@
void queryPhysicalCameraIds();
SystemCameraKind getSystemCameraKind();
status_t fixupMonochromeTags();
- status_t addDynamicDepthTags();
- status_t deriveHeicTags();
+ status_t addDynamicDepthTags(bool maxResolution = false);
+ status_t deriveHeicTags(bool maxResolution = false);
status_t addRotateCropTags();
status_t addPreCorrectionActiveArraySize();
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 35a06d8..00983f2 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -60,6 +60,7 @@
#include "device3/Camera3SharedOutputStream.h"
#include "CameraService.h"
#include "utils/CameraThreadState.h"
+#include "utils/SessionConfigurationUtils.h"
#include "utils/TraceHFR.h"
#include "utils/CameraServiceProxyWrapper.h"
@@ -69,6 +70,7 @@
using namespace android::camera3;
using namespace android::hardware::camera;
using namespace android::hardware::camera::device::V3_2;
+using android::hardware::camera::metadata::V3_6::CameraMetadataEnumAndroidSensorPixelMode;
namespace android {
@@ -489,8 +491,13 @@
const int STREAM_WIDTH_OFFSET = 1;
const int STREAM_HEIGHT_OFFSET = 2;
const int STREAM_IS_INPUT_OFFSET = 3;
+ bool isHighResolutionSensor =
+ camera3::SessionConfigurationUtils::isUltraHighResolutionSensor(mDeviceInfo);
+ int32_t scalerSizesTag = isHighResolutionSensor ?
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION :
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS;
camera_metadata_ro_entry_t availableStreamConfigs =
- mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+ mDeviceInfo.find(scalerSizesTag);
if (availableStreamConfigs.count == 0 ||
availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) {
return camera3::Size(0, 0);
@@ -628,6 +635,8 @@
ssize_t jpegBufferSize = scaleFactor * (maxJpegBufferSize - kMinJpegBufferSize) +
kMinJpegBufferSize;
if (jpegBufferSize > maxJpegBufferSize) {
+ ALOGI("%s: jpeg buffer size calculated is > maxJpeg bufferSize(%zd), clamping",
+ __FUNCTION__, maxJpegBufferSize);
jpegBufferSize = maxJpegBufferSize;
}
@@ -647,13 +656,17 @@
return maxBytesForPointCloud;
}
-ssize_t Camera3Device::getRawOpaqueBufferSize(int32_t width, int32_t height) const {
+ssize_t Camera3Device::getRawOpaqueBufferSize(int32_t width, int32_t height,
+ bool maxResolution) const {
const int PER_CONFIGURATION_SIZE = 3;
const int WIDTH_OFFSET = 0;
const int HEIGHT_OFFSET = 1;
const int SIZE_OFFSET = 2;
camera_metadata_ro_entry rawOpaqueSizes =
- mDeviceInfo.find(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
+ mDeviceInfo.find(
+ camera3::SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SENSOR_OPAQUE_RAW_SIZE,
+ maxResolution));
size_t count = rawOpaqueSizes.count;
if (count == 0 || (count % PER_CONFIGURATION_SIZE)) {
ALOGE("%s: Camera %s: bad opaque RAW size static metadata length(%zu)!",
@@ -1325,8 +1338,9 @@
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
const String8& physicalCameraId,
- std::vector<int> *surfaceIds, int streamSetId, bool isShared,
- bool isMultiResolution, uint64_t consumerUsage) {
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
+ uint64_t consumerUsage) {
ATRACE_CALL();
if (consumer == nullptr) {
@@ -1338,14 +1352,26 @@
consumers.push_back(consumer);
return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
- format, dataSpace, rotation, id, physicalCameraId, surfaceIds, streamSetId,
- isShared, isMultiResolution, consumerUsage);
+ format, dataSpace, rotation, id, physicalCameraId, sensorPixelModesUsed, surfaceIds,
+ streamSetId, isShared, isMultiResolution, consumerUsage);
+}
+
+static bool isRawFormat(int format) {
+ switch (format) {
+ case HAL_PIXEL_FORMAT_RAW16:
+ case HAL_PIXEL_FORMAT_RAW12:
+ case HAL_PIXEL_FORMAT_RAW10:
+ case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+ return true;
+ default:
+ return false;
+ }
}
status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
- const String8& physicalCameraId,
+ const String8& physicalCameraId, const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
uint64_t consumerUsage) {
ATRACE_CALL();
@@ -1399,6 +1425,12 @@
return BAD_VALUE;
}
+ if (isRawFormat(format) && sensorPixelModesUsed.size() > 1) {
+ // We can't use one stream with a raw format in both sensor pixel modes since its going to
+ // be found in only one sensor pixel mode.
+ ALOGE("%s: RAW opaque stream cannot be used with > 1 sensor pixel modes", __FUNCTION__);
+ return BAD_VALUE;
+ }
if (format == HAL_PIXEL_FORMAT_BLOB) {
ssize_t blobBufferSize;
if (dataSpace == HAL_DATASPACE_DEPTH) {
@@ -1418,28 +1450,36 @@
}
newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, blobBufferSize, format, dataSpace, rotation,
- mTimestampOffset, physicalCameraId, streamSetId, isMultiResolution);
+ mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
+ isMultiResolution);
} else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
- ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(width, height);
+ bool maxResolution =
+ sensorPixelModesUsed.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
+ sensorPixelModesUsed.end();
+ ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(width, height, maxResolution);
if (rawOpaqueBufferSize <= 0) {
SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
return BAD_VALUE;
}
newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
- mTimestampOffset, physicalCameraId, streamSetId, isMultiResolution);
+ mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
+ isMultiResolution);
} else if (isShared) {
newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
width, height, format, consumerUsage, dataSpace, rotation,
- mTimestampOffset, physicalCameraId, streamSetId, mUseHalBufManager);
+ mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
+ mUseHalBufManager);
} else if (consumers.size() == 0 && hasDeferredConsumer) {
newStream = new Camera3OutputStream(mNextStreamId,
width, height, format, consumerUsage, dataSpace, rotation,
- mTimestampOffset, physicalCameraId, streamSetId, isMultiResolution);
+ mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
+ isMultiResolution);
} else {
newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, format, dataSpace, rotation,
- mTimestampOffset, physicalCameraId, streamSetId, isMultiResolution);
+ mTimestampOffset, physicalCameraId, sensorPixelModesUsed, streamSetId,
+ isMultiResolution);
}
size_t consumerCount = consumers.size();
@@ -3212,7 +3252,7 @@
dst3_2.usage = mapToConsumerUsage(cam3stream->getUsage());
dst3_2.rotation = mapToStreamRotation((camera_stream_rotation_t) src->rotation);
// For HidlSession version 3.5 or newer, the format and dataSpace sent
- // to HAL are original, not the overriden ones.
+ // to HAL are original, not the overridden ones.
if (mHidlSession_3_5 != nullptr) {
dst3_2.format = mapToPixelFormat(cam3stream->isFormatOverridden() ?
cam3stream->getOriginalFormat() : src->format);
@@ -3229,7 +3269,12 @@
}
dst3_7.v3_4 = dst3_4;
dst3_7.groupId = cam3stream->getHalStreamGroupId();
-
+ dst3_7.sensorPixelModesUsed.resize(src->sensor_pixel_modes_used.size());
+ size_t j = 0;
+ for (int mode : src->sensor_pixel_modes_used) {
+ dst3_7.sensorPixelModesUsed[j++] =
+ static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
+ }
activeStreams.insert(streamId);
// Create Buffer ID map if necessary
mBufferRecords.tryCreateBufferCache(streamId);
@@ -3246,13 +3291,15 @@
}
requestedConfiguration3_2.operationMode = operationMode;
requestedConfiguration3_4.operationMode = operationMode;
+ requestedConfiguration3_7.operationMode = operationMode;
+ size_t sessionParamSize = get_camera_metadata_size(sessionParams);
requestedConfiguration3_4.sessionParams.setToExternal(
reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
- get_camera_metadata_size(sessionParams));
+ sessionParamSize);
requestedConfiguration3_7.operationMode = operationMode;
requestedConfiguration3_7.sessionParams.setToExternal(
reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
- get_camera_metadata_size(sessionParams));
+ sessionParamSize);
// Invoke configureStreams
device::V3_3::HalStreamConfiguration finalConfiguration;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 018dbe5..fccf384 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -132,14 +132,17 @@
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds = nullptr,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
bool isShared = false, bool isMultiResolution = false,
uint64_t consumerUsage = 0) override;
+
status_t createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds = nullptr,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
bool isShared = false, bool isMultiResolution = false,
@@ -190,7 +193,7 @@
ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const override;
ssize_t getPointCloudBufferSize() const;
- ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height) const;
+ ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height, bool maxResolution) const;
// Methods called by subclasses
void notifyStatus(bool idle); // updates from StatusTracker
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
index 2196c7d..8cc6833 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
@@ -31,7 +31,7 @@
Camera3FakeStream::Camera3FakeStream(int id) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, FAKE_WIDTH, FAKE_HEIGHT,
/*maxSize*/0, FAKE_FORMAT, FAKE_DATASPACE, FAKE_ROTATION,
- FAKE_ID) {
+ FAKE_ID, std::unordered_set<int32_t>{ANDROID_SENSOR_PIXEL_MODE_DEFAULT}) {
}
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index a837900..0204d49 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -32,10 +32,12 @@
Camera3IOStreamBase::Camera3IOStreamBase(int id, camera_stream_type_t type,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
- const String8& physicalCameraId, int setId, bool isMultiResolution) :
+ const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ int setId, bool isMultiResolution) :
Camera3Stream(id, type,
width, height, maxSize, format, dataSpace, rotation,
- physicalCameraId, setId, isMultiResolution),
+ physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
mTotalBufferCount(0),
mHandoutTotalBufferCount(0),
mHandoutOutputBufferCount(0),
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 2e744ee..90c8a7b 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -36,6 +36,7 @@
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
public:
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index b00a963..6d8317b 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -33,7 +33,8 @@
uint32_t width, uint32_t height, int format) :
Camera3IOStreamBase(id, CAMERA_STREAM_INPUT, width, height, /*maxSize*/0,
format, HAL_DATASPACE_UNKNOWN, CAMERA_STREAM_ROTATION_0,
- FAKE_ID) {
+ FAKE_ID,
+ std::unordered_set<int32_t>{ANDROID_SENSOR_PIXEL_MODE_DEFAULT}) {
if (format == HAL_PIXEL_FORMAT_BLOB) {
ALOGE("%s: Bad format, BLOB not supported", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 3ec3b6b..221bebb 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -44,10 +44,11 @@
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
nsecs_t timestampOffset, const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId, bool isMultiResolution) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
/*maxSize*/0, format, dataSpace, rotation,
- physicalCameraId, setId, isMultiResolution),
+ physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
mConsumer(consumer),
mTransform(0),
mTraceFirstBuffer(true),
@@ -70,11 +71,12 @@
sp<Surface> consumer,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
- nsecs_t timestampOffset, const String8& physicalCameraId, int setId,
- bool isMultiResolution) :
+ nsecs_t timestampOffset, const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ int setId, bool isMultiResolution) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
- format, dataSpace, rotation, physicalCameraId, setId,
- isMultiResolution),
+ format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
+ setId, isMultiResolution),
mConsumer(consumer),
mTransform(0),
mTraceFirstBuffer(true),
@@ -104,10 +106,12 @@
uint32_t width, uint32_t height, int format,
uint64_t consumerUsage, android_dataspace dataSpace,
camera_stream_rotation_t rotation, nsecs_t timestampOffset,
- const String8& physicalCameraId, int setId, bool isMultiResolution) :
+ const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ int setId, bool isMultiResolution) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
/*maxSize*/0, format, dataSpace, rotation,
- physicalCameraId, setId, isMultiResolution),
+ physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
mConsumer(nullptr),
mTransform(0),
mTraceFirstBuffer(true),
@@ -142,12 +146,13 @@
android_dataspace dataSpace,
camera_stream_rotation_t rotation,
const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
uint64_t consumerUsage, nsecs_t timestampOffset,
int setId, bool isMultiResolution) :
Camera3IOStreamBase(id, type, width, height,
/*maxSize*/0,
format, dataSpace, rotation,
- physicalCameraId, setId, isMultiResolution),
+ physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution),
mTransform(0),
mTraceFirstBuffer(true),
mUseMonoTimestamp(false),
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index c82f2a6..00e4854 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -87,8 +87,8 @@
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
nsecs_t timestampOffset, const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
-
/**
* Set up a stream for formats that have a variable buffer size for the same
* dimensions, such as compressed JPEG.
@@ -99,8 +99,8 @@
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
nsecs_t timestampOffset, const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
-
/**
* Set up a stream with deferred consumer for formats that have 2 dimensions, such as
* RAW and YUV. The consumer must be set before using this stream for output. A valid
@@ -110,6 +110,7 @@
uint64_t consumerUsage, android_dataspace dataSpace,
camera_stream_rotation_t rotation, nsecs_t timestampOffset,
const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
virtual ~Camera3OutputStream();
@@ -234,6 +235,7 @@
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
uint64_t consumerUsage = 0, nsecs_t timestampOffset = 0,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false);
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 8aa5f1a..15cf7f4 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -32,9 +32,10 @@
uint64_t consumerUsage, android_dataspace dataSpace,
camera_stream_rotation_t rotation,
nsecs_t timestampOffset, const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId, bool useHalBufManager) :
Camera3OutputStream(id, CAMERA_STREAM_OUTPUT, width, height,
- format, dataSpace, rotation, physicalCameraId,
+ format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
consumerUsage, timestampOffset, setId),
mUseHalBufManager(useHalBufManager) {
size_t consumerCount = std::min(surfaces.size(), kMaxOutputs);
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
index a61316c..4b6341b 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -38,6 +38,7 @@
uint64_t consumerUsage, android_dataspace dataSpace,
camera_stream_rotation_t rotation, nsecs_t timestampOffset,
const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId = CAMERA3_STREAM_SET_ID_INVALID,
bool useHalBufManager = false);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index c6e7002..02b6585 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -49,7 +49,9 @@
camera_stream_type type,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
- const String8& physicalCameraId, int setId, bool isMultiResolution) :
+ const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ int setId, bool isMultiResolution) :
camera_stream(),
mId(id),
mSetId(setId),
@@ -84,6 +86,7 @@
camera_stream::rotation = rotation;
camera_stream::max_buffers = 0;
camera_stream::physical_camera_id = mPhysicalCameraId.string();
+ camera_stream::sensor_pixel_modes_used = sensorPixelModesUsed;
if ((format == HAL_PIXEL_FORMAT_BLOB || format == HAL_PIXEL_FORMAT_RAW_OPAQUE) &&
maxSize == 0) {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 45d8478..5a364ab 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -498,7 +498,9 @@
Camera3Stream(int id, camera_stream_type type,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera_stream_rotation_t rotation,
- const String8& physicalCameraId, int setId, bool isMultiResolution);
+ const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ int setId, bool isMultiResolution);
wp<Camera3StreamBufferFreedListener> mBufferFreedListener;
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index a567cb4..ca80595 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -62,6 +62,8 @@
android_dataspace_t data_space;
camera_stream_rotation_t rotation;
const char* physical_camera_id;
+
+ std::unordered_set<int32_t> sensor_pixel_modes_used;
} camera_stream_t;
typedef struct camera_stream_buffer {
@@ -104,13 +106,15 @@
uint64_t consumerUsage;
bool finalized = false;
bool supportsOffline = false;
+ std::unordered_set<int32_t> sensorPixelModesUsed;
OutputStreamInfo() :
width(-1), height(-1), format(-1), dataSpace(HAL_DATASPACE_UNKNOWN),
consumerUsage(0) {}
OutputStreamInfo(int _width, int _height, int _format, android_dataspace _dataSpace,
- uint64_t _consumerUsage) :
+ uint64_t _consumerUsage, const std::unordered_set<int32_t>& _sensorPixelModesUsed) :
width(_width), height(_height), format(_format),
- dataSpace(_dataSpace), consumerUsage(_consumerUsage) {}
+ dataSpace(_dataSpace), consumerUsage(_consumerUsage),
+ sensorPixelModesUsed(_sensorPixelModesUsed) {}
};
/**
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
index 316303e..89dd115 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.cpp
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -22,13 +22,14 @@
#include <cmath>
#include "device3/DistortionMapper.h"
+#include "utils/SessionConfigurationUtils.h"
namespace android {
namespace camera3 {
-DistortionMapper::DistortionMapper() : mValidMapping(false), mValidGrids(false) {
+DistortionMapper::DistortionMapper() {
initRemappedKeys();
}
@@ -61,41 +62,81 @@
status_t DistortionMapper::setupStaticInfo(const CameraMetadata &deviceInfo) {
std::lock_guard<std::mutex> lock(mMutex);
+ status_t res = setupStaticInfoLocked(deviceInfo, /*maxResolution*/false);
+ if (res != OK) {
+ return res;
+ }
+
+ bool mMaxResolution = SessionConfigurationUtils::isUltraHighResolutionSensor(deviceInfo);
+ if (mMaxResolution) {
+ res = setupStaticInfoLocked(deviceInfo, /*maxResolution*/true);
+ }
+ return res;
+}
+
+status_t DistortionMapper::setupStaticInfoLocked(const CameraMetadata &deviceInfo,
+ bool maxResolution) {
+ DistortionMapperInfo *mapperInfo = maxResolution ? &mDistortionMapperInfoMaximumResolution :
+ &mDistortionMapperInfo;
+
camera_metadata_ro_entry_t array;
- array = deviceInfo.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
+ array = deviceInfo.find(
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, maxResolution));
if (array.count != 4) return BAD_VALUE;
float arrayX = static_cast<float>(array.data.i32[0]);
float arrayY = static_cast<float>(array.data.i32[1]);
- mArrayWidth = static_cast<float>(array.data.i32[2]);
- mArrayHeight = static_cast<float>(array.data.i32[3]);
+ mapperInfo->mArrayWidth = static_cast<float>(array.data.i32[2]);
+ mapperInfo->mArrayHeight = static_cast<float>(array.data.i32[3]);
- array = deviceInfo.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
+ array = deviceInfo.find(
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, maxResolution));
if (array.count != 4) return BAD_VALUE;
float activeX = static_cast<float>(array.data.i32[0]);
float activeY = static_cast<float>(array.data.i32[1]);
- mActiveWidth = static_cast<float>(array.data.i32[2]);
- mActiveHeight = static_cast<float>(array.data.i32[3]);
+ mapperInfo->mActiveWidth = static_cast<float>(array.data.i32[2]);
+ mapperInfo->mActiveHeight = static_cast<float>(array.data.i32[3]);
- mArrayDiffX = activeX - arrayX;
- mArrayDiffY = activeY - arrayY;
+ mapperInfo->mArrayDiffX = activeX - arrayX;
+ mapperInfo->mArrayDiffY = activeY - arrayY;
- return updateCalibration(deviceInfo);
+ return updateCalibration(deviceInfo, /*isStatic*/ true, maxResolution);
+}
+
+static bool doesSettingsHaveMaxResolution(const CameraMetadata *settings) {
+ if (settings == nullptr) {
+ return false;
+ }
+ // First we get the sensorPixelMode from the settings metadata.
+ camera_metadata_ro_entry sensorPixelModeEntry = settings->find(ANDROID_SENSOR_PIXEL_MODE);
+ if (sensorPixelModeEntry.count != 0) {
+ return (sensorPixelModeEntry.data.u8[0] == ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION);
+ }
+ return false;
}
bool DistortionMapper::calibrationValid() const {
std::lock_guard<std::mutex> lock(mMutex);
-
- return mValidMapping;
+ bool isValid = mDistortionMapperInfo.mValidMapping;
+ if (mMaxResolution) {
+ isValid = isValid && mDistortionMapperInfoMaximumResolution.mValidMapping;
+ }
+ return isValid;
}
status_t DistortionMapper::correctCaptureRequest(CameraMetadata *request) {
std::lock_guard<std::mutex> lock(mMutex);
status_t res;
- if (!mValidMapping) return OK;
+ bool maxResolution = doesSettingsHaveMaxResolution(request);
+ DistortionMapperInfo *mapperInfo = maxResolution ? &mDistortionMapperInfoMaximumResolution :
+ &mDistortionMapperInfo;
+
+ if (!mapperInfo->mValidMapping) return OK;
camera_metadata_entry_t e;
e = request->find(ANDROID_DISTORTION_CORRECTION_MODE);
@@ -107,27 +148,30 @@
if (weight == 0) {
continue;
}
- res = mapCorrectedToRaw(e.data.i32 + j, 2, /*clamp*/true);
+ res = mapCorrectedToRaw(e.data.i32 + j, 2, mapperInfo, /*clamp*/true);
if (res != OK) return res;
}
}
for (auto rect : kRectsToCorrect) {
e = request->find(rect);
- res = mapCorrectedRectToRaw(e.data.i32, e.count / 4, /*clamp*/true);
+ res = mapCorrectedRectToRaw(e.data.i32, e.count / 4, mapperInfo, /*clamp*/true);
if (res != OK) return res;
}
}
-
return OK;
}
status_t DistortionMapper::correctCaptureResult(CameraMetadata *result) {
std::lock_guard<std::mutex> lock(mMutex);
+
+ bool maxResolution = doesSettingsHaveMaxResolution(result);
+ DistortionMapperInfo *mapperInfo = maxResolution ? &mDistortionMapperInfoMaximumResolution :
+ &mDistortionMapperInfo;
status_t res;
- if (!mValidMapping) return OK;
+ if (!mapperInfo->mValidMapping) return OK;
- res = updateCalibration(*result);
+ res = updateCalibration(*result, /*isStatic*/ false, maxResolution);
if (res != OK) {
ALOGE("Failure to update lens calibration information");
return INVALID_OPERATION;
@@ -143,18 +187,18 @@
if (weight == 0) {
continue;
}
- res = mapRawToCorrected(e.data.i32 + j, 2, /*clamp*/true);
+ res = mapRawToCorrected(e.data.i32 + j, 2, mapperInfo, /*clamp*/true);
if (res != OK) return res;
}
}
for (auto rect : kRectsToCorrect) {
e = result->find(rect);
- res = mapRawRectToCorrected(e.data.i32, e.count / 4, /*clamp*/true);
+ res = mapRawRectToCorrected(e.data.i32, e.count / 4, mapperInfo, /*clamp*/true);
if (res != OK) return res;
}
for (auto pts : kResultPointsToCorrectNoClamp) {
e = result->find(pts);
- res = mapRawToCorrected(e.data.i32, e.count / 2, /*clamp*/false);
+ res = mapRawToCorrected(e.data.i32, e.count / 2, mapperInfo, /*clamp*/false);
if (res != OK) return res;
}
}
@@ -164,25 +208,37 @@
// Utility methods; not guarded by mutex
-status_t DistortionMapper::updateCalibration(const CameraMetadata &result) {
+status_t DistortionMapper::updateCalibration(const CameraMetadata &result, bool isStatic,
+ bool maxResolution) {
camera_metadata_ro_entry_t calib, distortion;
+ DistortionMapperInfo *mapperInfo =
+ maxResolution ? &mDistortionMapperInfoMaximumResolution : &mDistortionMapperInfo;
+ // We only need maximum resolution version of LENS_INTRINSIC_CALIBRATION and
+ // LENS_DISTORTION since CaptureResults would still use the same key
+ // regardless of sensor pixel mode.
+ int calibrationKey =
+ SessionConfigurationUtils::getAppropriateModeTag(ANDROID_LENS_INTRINSIC_CALIBRATION,
+ maxResolution && isStatic);
+ int distortionKey =
+ SessionConfigurationUtils::getAppropriateModeTag(ANDROID_LENS_DISTORTION,
+ maxResolution && isStatic);
- calib = result.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
- distortion = result.find(ANDROID_LENS_DISTORTION);
+ calib = result.find(calibrationKey);
+ distortion = result.find(distortionKey);
if (calib.count != 5) return BAD_VALUE;
if (distortion.count != 5) return BAD_VALUE;
// Skip redoing work if no change to calibration fields
- if (mValidMapping &&
- mFx == calib.data.f[0] &&
- mFy == calib.data.f[1] &&
- mCx == calib.data.f[2] &&
- mCy == calib.data.f[3] &&
- mS == calib.data.f[4]) {
+ if (mapperInfo->mValidMapping &&
+ mapperInfo->mFx == calib.data.f[0] &&
+ mapperInfo->mFy == calib.data.f[1] &&
+ mapperInfo->mCx == calib.data.f[2] &&
+ mapperInfo->mCy == calib.data.f[3] &&
+ mapperInfo->mS == calib.data.f[4]) {
bool noChange = true;
for (size_t i = 0; i < distortion.count; i++) {
- if (mK[i] != distortion.data.f[i]) {
+ if (mapperInfo->mK[i] != distortion.data.f[i]) {
noChange = false;
break;
}
@@ -190,39 +246,39 @@
if (noChange) return OK;
}
- mFx = calib.data.f[0];
- mFy = calib.data.f[1];
- mCx = calib.data.f[2];
- mCy = calib.data.f[3];
- mS = calib.data.f[4];
+ mapperInfo->mFx = calib.data.f[0];
+ mapperInfo->mFy = calib.data.f[1];
+ mapperInfo->mCx = calib.data.f[2];
+ mapperInfo->mCy = calib.data.f[3];
+ mapperInfo->mS = calib.data.f[4];
- mInvFx = 1 / mFx;
- mInvFy = 1 / mFy;
+ mapperInfo->mInvFx = 1 / mapperInfo->mFx;
+ mapperInfo->mInvFy = 1 / mapperInfo->mFy;
for (size_t i = 0; i < distortion.count; i++) {
- mK[i] = distortion.data.f[i];
+ mapperInfo->mK[i] = distortion.data.f[i];
}
- mValidMapping = true;
+ mapperInfo->mValidMapping = true;
// Need to recalculate grid
- mValidGrids = false;
+ mapperInfo->mValidGrids = false;
return OK;
}
status_t DistortionMapper::mapRawToCorrected(int32_t *coordPairs, int coordCount,
- bool clamp, bool simple) {
- if (!mValidMapping) return INVALID_OPERATION;
+ DistortionMapperInfo *mapperInfo, bool clamp, bool simple) {
+ if (!mapperInfo->mValidMapping) return INVALID_OPERATION;
- if (simple) return mapRawToCorrectedSimple(coordPairs, coordCount, clamp);
+ if (simple) return mapRawToCorrectedSimple(coordPairs, coordCount, mapperInfo, clamp);
- if (!mValidGrids) {
- status_t res = buildGrids();
+ if (!mapperInfo->mValidGrids) {
+ status_t res = buildGrids(mapperInfo);
if (res != OK) return res;
}
for (int i = 0; i < coordCount * 2; i += 2) {
- const GridQuad *quad = findEnclosingQuad(coordPairs + i, mDistortedGrid);
+ const GridQuad *quad = findEnclosingQuad(coordPairs + i, mapperInfo->mDistortedGrid);
if (quad == nullptr) {
ALOGE("Raw to corrected mapping failure: No quad found for (%d, %d)",
*(coordPairs + i), *(coordPairs + i + 1));
@@ -258,8 +314,8 @@
// Clamp to within active array
if (clamp) {
- corrX = std::min(mActiveWidth - 1, std::max(0.f, corrX));
- corrY = std::min(mActiveHeight - 1, std::max(0.f, corrY));
+ corrX = std::min(mapperInfo->mActiveWidth - 1, std::max(0.f, corrX));
+ corrY = std::min(mapperInfo->mActiveHeight - 1, std::max(0.f, corrY));
}
coordPairs[i] = static_cast<int32_t>(std::round(corrX));
@@ -270,19 +326,19 @@
}
status_t DistortionMapper::mapRawToCorrectedSimple(int32_t *coordPairs, int coordCount,
- bool clamp) const {
- if (!mValidMapping) return INVALID_OPERATION;
+ const DistortionMapperInfo *mapperInfo, bool clamp) const {
+ if (!mapperInfo->mValidMapping) return INVALID_OPERATION;
- float scaleX = mActiveWidth / mArrayWidth;
- float scaleY = mActiveHeight / mArrayHeight;
+ float scaleX = mapperInfo->mActiveWidth / mapperInfo->mArrayWidth;
+ float scaleY = mapperInfo->mActiveHeight / mapperInfo->mArrayHeight;
for (int i = 0; i < coordCount * 2; i += 2) {
float x = coordPairs[i];
float y = coordPairs[i + 1];
float corrX = x * scaleX;
float corrY = y * scaleY;
if (clamp) {
- corrX = std::min(mActiveWidth - 1, std::max(0.f, corrX));
- corrY = std::min(mActiveHeight - 1, std::max(0.f, corrY));
+ corrX = std::min(mapperInfo->mActiveWidth - 1, std::max(0.f, corrX));
+ corrY = std::min(mapperInfo->mActiveHeight - 1, std::max(0.f, corrY));
}
coordPairs[i] = static_cast<int32_t>(std::round(corrX));
coordPairs[i + 1] = static_cast<int32_t>(std::round(corrY));
@@ -291,9 +347,9 @@
return OK;
}
-status_t DistortionMapper::mapRawRectToCorrected(int32_t *rects, int rectCount, bool clamp,
- bool simple) {
- if (!mValidMapping) return INVALID_OPERATION;
+status_t DistortionMapper::mapRawRectToCorrected(int32_t *rects, int rectCount,
+ DistortionMapperInfo *mapperInfo, bool clamp, bool simple) {
+ if (!mapperInfo->mValidMapping) return INVALID_OPERATION;
for (int i = 0; i < rectCount * 4; i += 4) {
// Map from (l, t, width, height) to (l, t, r, b)
int32_t coords[4] = {
@@ -303,7 +359,7 @@
rects[i + 1] + rects[i + 3] - 1
};
- mapRawToCorrected(coords, 2, clamp, simple);
+ mapRawToCorrected(coords, 2, mapperInfo, clamp, simple);
// Map back to (l, t, width, height)
rects[i] = coords[0];
@@ -315,60 +371,60 @@
return OK;
}
-status_t DistortionMapper::mapCorrectedToRaw(int32_t *coordPairs, int coordCount, bool clamp,
- bool simple) const {
- return mapCorrectedToRawImpl(coordPairs, coordCount, clamp, simple);
+status_t DistortionMapper::mapCorrectedToRaw(int32_t *coordPairs, int coordCount,
+ const DistortionMapperInfo *mapperInfo, bool clamp, bool simple) const {
+ return mapCorrectedToRawImpl(coordPairs, coordCount, mapperInfo, clamp, simple);
}
template<typename T>
-status_t DistortionMapper::mapCorrectedToRawImpl(T *coordPairs, int coordCount, bool clamp,
- bool simple) const {
- if (!mValidMapping) return INVALID_OPERATION;
+status_t DistortionMapper::mapCorrectedToRawImpl(T *coordPairs, int coordCount,
+ const DistortionMapperInfo *mapperInfo, bool clamp, bool simple) const {
+ if (!mapperInfo->mValidMapping) return INVALID_OPERATION;
- if (simple) return mapCorrectedToRawImplSimple(coordPairs, coordCount, clamp);
+ if (simple) return mapCorrectedToRawImplSimple(coordPairs, coordCount, mapperInfo, clamp);
- float activeCx = mCx - mArrayDiffX;
- float activeCy = mCy - mArrayDiffY;
+ float activeCx = mapperInfo->mCx - mapperInfo->mArrayDiffX;
+ float activeCy = mapperInfo->mCy - mapperInfo->mArrayDiffY;
for (int i = 0; i < coordCount * 2; i += 2) {
// Move to normalized space from active array space
- float ywi = (coordPairs[i + 1] - activeCy) * mInvFy;
- float xwi = (coordPairs[i] - activeCx - mS * ywi) * mInvFx;
+ float ywi = (coordPairs[i + 1] - activeCy) * mapperInfo->mInvFy;
+ float xwi = (coordPairs[i] - activeCx - mapperInfo->mS * ywi) * mapperInfo->mInvFx;
// Apply distortion model to calculate raw image coordinates
+ const std::array<float, 5> &kK = mapperInfo->mK;
float rSq = xwi * xwi + ywi * ywi;
- float Fr = 1.f + (mK[0] * rSq) + (mK[1] * rSq * rSq) + (mK[2] * rSq * rSq * rSq);
- float xc = xwi * Fr + (mK[3] * 2 * xwi * ywi) + mK[4] * (rSq + 2 * xwi * xwi);
- float yc = ywi * Fr + (mK[4] * 2 * xwi * ywi) + mK[3] * (rSq + 2 * ywi * ywi);
+ float Fr = 1.f + (kK[0] * rSq) + (kK[1] * rSq * rSq) + (kK[2] * rSq * rSq * rSq);
+ float xc = xwi * Fr + (kK[3] * 2 * xwi * ywi) + kK[4] * (rSq + 2 * xwi * xwi);
+ float yc = ywi * Fr + (kK[4] * 2 * xwi * ywi) + kK[3] * (rSq + 2 * ywi * ywi);
// Move back to image space
- float xr = mFx * xc + mS * yc + mCx;
- float yr = mFy * yc + mCy;
+ float xr = mapperInfo->mFx * xc + mapperInfo->mS * yc + mapperInfo->mCx;
+ float yr = mapperInfo->mFy * yc + mapperInfo->mCy;
// Clamp to within pre-correction active array
if (clamp) {
- xr = std::min(mArrayWidth - 1, std::max(0.f, xr));
- yr = std::min(mArrayHeight - 1, std::max(0.f, yr));
+ xr = std::min(mapperInfo->mArrayWidth - 1, std::max(0.f, xr));
+ yr = std::min(mapperInfo->mArrayHeight - 1, std::max(0.f, yr));
}
coordPairs[i] = static_cast<T>(std::round(xr));
coordPairs[i + 1] = static_cast<T>(std::round(yr));
}
-
return OK;
}
template<typename T>
status_t DistortionMapper::mapCorrectedToRawImplSimple(T *coordPairs, int coordCount,
- bool clamp) const {
- if (!mValidMapping) return INVALID_OPERATION;
+ const DistortionMapperInfo *mapperInfo, bool clamp) const {
+ if (!mapperInfo->mValidMapping) return INVALID_OPERATION;
- float scaleX = mArrayWidth / mActiveWidth;
- float scaleY = mArrayHeight / mActiveHeight;
+ float scaleX = mapperInfo->mArrayWidth / mapperInfo->mActiveWidth;
+ float scaleY = mapperInfo->mArrayHeight / mapperInfo->mActiveHeight;
for (int i = 0; i < coordCount * 2; i += 2) {
float x = coordPairs[i];
float y = coordPairs[i + 1];
float rawX = x * scaleX;
float rawY = y * scaleY;
if (clamp) {
- rawX = std::min(mArrayWidth - 1, std::max(0.f, rawX));
- rawY = std::min(mArrayHeight - 1, std::max(0.f, rawY));
+ rawX = std::min(mapperInfo->mArrayWidth - 1, std::max(0.f, rawX));
+ rawY = std::min(mapperInfo->mArrayHeight - 1, std::max(0.f, rawY));
}
coordPairs[i] = static_cast<T>(std::round(rawX));
coordPairs[i + 1] = static_cast<T>(std::round(rawY));
@@ -377,9 +433,9 @@
return OK;
}
-status_t DistortionMapper::mapCorrectedRectToRaw(int32_t *rects, int rectCount, bool clamp,
- bool simple) const {
- if (!mValidMapping) return INVALID_OPERATION;
+status_t DistortionMapper::mapCorrectedRectToRaw(int32_t *rects, int rectCount,
+ const DistortionMapperInfo *mapperInfo, bool clamp, bool simple) const {
+ if (!mapperInfo->mValidMapping) return INVALID_OPERATION;
for (int i = 0; i < rectCount * 4; i += 4) {
// Map from (l, t, width, height) to (l, t, r, b)
@@ -390,7 +446,7 @@
rects[i + 1] + rects[i + 3] - 1
};
- mapCorrectedToRaw(coords, 2, clamp, simple);
+ mapCorrectedToRaw(coords, 2, mapperInfo, clamp, simple);
// Map back to (l, t, width, height)
rects[i] = coords[0];
@@ -402,37 +458,37 @@
return OK;
}
-status_t DistortionMapper::buildGrids() {
- if (mCorrectedGrid.size() != kGridSize * kGridSize) {
- mCorrectedGrid.resize(kGridSize * kGridSize);
- mDistortedGrid.resize(kGridSize * kGridSize);
+status_t DistortionMapper::buildGrids(DistortionMapperInfo *mapperInfo) {
+ if (mapperInfo->mCorrectedGrid.size() != kGridSize * kGridSize) {
+ mapperInfo->mCorrectedGrid.resize(kGridSize * kGridSize);
+ mapperInfo->mDistortedGrid.resize(kGridSize * kGridSize);
}
- float gridMargin = mArrayWidth * kGridMargin;
- float gridSpacingX = (mArrayWidth + 2 * gridMargin) / kGridSize;
- float gridSpacingY = (mArrayHeight + 2 * gridMargin) / kGridSize;
+ float gridMargin = mapperInfo->mArrayWidth * kGridMargin;
+ float gridSpacingX = (mapperInfo->mArrayWidth + 2 * gridMargin) / kGridSize;
+ float gridSpacingY = (mapperInfo->mArrayHeight + 2 * gridMargin) / kGridSize;
size_t index = 0;
float x = -gridMargin;
for (size_t i = 0; i < kGridSize; i++, x += gridSpacingX) {
float y = -gridMargin;
for (size_t j = 0; j < kGridSize; j++, y += gridSpacingY, index++) {
- mCorrectedGrid[index].src = nullptr;
- mCorrectedGrid[index].coords = {
+ mapperInfo->mCorrectedGrid[index].src = nullptr;
+ mapperInfo->mCorrectedGrid[index].coords = {
x, y,
x + gridSpacingX, y,
x + gridSpacingX, y + gridSpacingY,
x, y + gridSpacingY
};
- mDistortedGrid[index].src = &mCorrectedGrid[index];
- mDistortedGrid[index].coords = mCorrectedGrid[index].coords;
- status_t res = mapCorrectedToRawImpl(mDistortedGrid[index].coords.data(), 4,
- /*clamp*/false, /*simple*/false);
+ mapperInfo->mDistortedGrid[index].src = &(mapperInfo->mCorrectedGrid[index]);
+ mapperInfo->mDistortedGrid[index].coords = mapperInfo->mCorrectedGrid[index].coords;
+ status_t res = mapCorrectedToRawImpl(mapperInfo->mDistortedGrid[index].coords.data(), 4,
+ mapperInfo, /*clamp*/false, /*simple*/false);
if (res != OK) return res;
}
}
- mValidGrids = true;
+ mapperInfo->mValidGrids = true;
return OK;
}
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.h b/services/camera/libcameraservice/device3/DistortionMapper.h
index 5027bd0..96f4fda 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.h
+++ b/services/camera/libcameraservice/device3/DistortionMapper.h
@@ -37,13 +37,8 @@
DistortionMapper();
DistortionMapper(const DistortionMapper& other) :
- mValidMapping(other.mValidMapping), mValidGrids(other.mValidGrids),
- mFx(other.mFx), mFy(other.mFy), mCx(other.mCx), mCy(other.mCy), mS(other.mS),
- mInvFx(other.mInvFx), mInvFy(other.mInvFy), mK(other.mK),
- mArrayWidth(other.mArrayWidth), mArrayHeight(other.mArrayHeight),
- mActiveWidth(other.mActiveWidth), mActiveHeight(other.mActiveHeight),
- mArrayDiffX(other.mArrayDiffX), mArrayDiffY(other.mArrayDiffY),
- mCorrectedGrid(other.mCorrectedGrid), mDistortedGrid(other.mDistortedGrid) {
+ mDistortionMapperInfo(other.mDistortionMapperInfo),
+ mDistortionMapperInfoMaximumResolution(other.mDistortionMapperInfoMaximumResolution) {
initRemappedKeys(); }
void initRemappedKeys() override;
@@ -75,10 +70,14 @@
public: // Visible for testing. Not guarded by mutex; do not use concurrently
+
+ struct DistortionMapperInfo;
+
/**
* Update lens calibration from capture results or equivalent
*/
- status_t updateCalibration(const CameraMetadata &result);
+ status_t updateCalibration(const CameraMetadata &result, bool isStatic = false,
+ bool maxResolution = false);
/**
* Transform from distorted (original) to corrected (warped) coordinates.
@@ -89,8 +88,8 @@
* clamp: Whether to clamp the result to the bounds of the active array
* simple: Whether to do complex correction or just a simple linear map
*/
- status_t mapRawToCorrected(int32_t *coordPairs, int coordCount, bool clamp,
- bool simple = true);
+ status_t mapRawToCorrected(int32_t *coordPairs, int coordCount,
+ DistortionMapperInfo *mapperInfo, bool clamp, bool simple = true);
/**
* Transform from distorted (original) to corrected (warped) coordinates.
@@ -101,8 +100,8 @@
* clamp: Whether to clamp the result to the bounds of the active array
* simple: Whether to do complex correction or just a simple linear map
*/
- status_t mapRawRectToCorrected(int32_t *rects, int rectCount, bool clamp,
- bool simple = true);
+ status_t mapRawRectToCorrected(int32_t *rects, int rectCount,
+ DistortionMapperInfo *mapperInfo, bool clamp, bool simple = true);
/**
* Transform from corrected (warped) to distorted (original) coordinates.
@@ -113,8 +112,8 @@
* clamp: Whether to clamp the result to the bounds of the precorrection active array
* simple: Whether to do complex correction or just a simple linear map
*/
- status_t mapCorrectedToRaw(int32_t* coordPairs, int coordCount, bool clamp,
- bool simple = true) const;
+ status_t mapCorrectedToRaw(int32_t* coordPairs, int coordCount,
+ const DistortionMapperInfo *mapperInfo, bool clamp, bool simple = true) const;
/**
* Transform from corrected (warped) to distorted (original) coordinates.
@@ -125,8 +124,8 @@
* clamp: Whether to clamp the result to the bounds of the precorrection active array
* simple: Whether to do complex correction or just a simple linear map
*/
- status_t mapCorrectedRectToRaw(int32_t *rects, int rectCount, bool clamp,
- bool simple = true) const;
+ status_t mapCorrectedRectToRaw(int32_t *rects, int rectCount,
+ const DistortionMapperInfo *mapperInfo, bool clamp, bool simple = true) const;
struct GridQuad {
// Source grid quad, or null
@@ -136,6 +135,28 @@
std::array<float, 8> coords;
};
+ struct DistortionMapperInfo {
+ bool mValidMapping = false;
+ bool mValidGrids = false;
+
+ // intrisic parameters, in pixels
+ float mFx, mFy, mCx, mCy, mS;
+ // pre-calculated inverses for speed
+ float mInvFx, mInvFy;
+ // radial/tangential distortion parameters
+ std::array<float, 5> mK;
+
+ // pre-correction active array dimensions
+ float mArrayWidth, mArrayHeight;
+ // active array dimensions
+ float mActiveWidth, mActiveHeight;
+ // corner offsets between pre-correction and active arrays
+ float mArrayDiffX, mArrayDiffY;
+
+ std::vector<GridQuad> mCorrectedGrid;
+ std::vector<GridQuad> mDistortedGrid;
+ };
+
// Find which grid quad encloses the point; returns null if none do
static const GridQuad* findEnclosingQuad(
const int32_t pt[2], const std::vector<GridQuad>& grid);
@@ -153,6 +174,11 @@
// if it is false, then an interpolation coordinate for edges E14 and E23 is found.
static float calculateUorV(const int32_t pt[2], const GridQuad& quad, bool calculateU);
+ DistortionMapperInfo *getMapperInfo(bool maxResolution = false) {
+ return maxResolution ? &mDistortionMapperInfoMaximumResolution :
+ &mDistortionMapperInfo;
+ };
+
private:
mutable std::mutex mMutex;
@@ -163,39 +189,28 @@
// Fuzziness for float inequality tests
constexpr static float kFloatFuzz = 1e-4;
+ bool mMaxResolution = false;
+
+ status_t setupStaticInfoLocked(const CameraMetadata &deviceInfo, bool maxResolution);
+
// Single implementation for various mapCorrectedToRaw methods
template<typename T>
- status_t mapCorrectedToRawImpl(T* coordPairs, int coordCount, bool clamp, bool simple) const;
+ status_t mapCorrectedToRawImpl(T* coordPairs, int coordCount,
+ const DistortionMapperInfo *mapperInfo, bool clamp, bool simple) const;
// Simple linear interpolation option
template<typename T>
- status_t mapCorrectedToRawImplSimple(T* coordPairs, int coordCount, bool clamp) const;
+ status_t mapCorrectedToRawImplSimple(T* coordPairs, int coordCount,
+ const DistortionMapperInfo *mapperInfo, bool clamp) const;
- status_t mapRawToCorrectedSimple(int32_t *coordPairs, int coordCount, bool clamp) const;
+ status_t mapRawToCorrectedSimple(int32_t *coordPairs, int coordCount,
+ const DistortionMapperInfo *mapperInfo, bool clamp) const;
// Utility to create reverse mapping grids
- status_t buildGrids();
+ status_t buildGrids(DistortionMapperInfo *mapperInfo);
-
- bool mValidMapping;
- bool mValidGrids;
-
- // intrisic parameters, in pixels
- float mFx, mFy, mCx, mCy, mS;
- // pre-calculated inverses for speed
- float mInvFx, mInvFy;
- // radial/tangential distortion parameters
- std::array<float, 5> mK;
-
- // pre-correction active array dimensions
- float mArrayWidth, mArrayHeight;
- // active array dimensions
- float mActiveWidth, mActiveHeight;
- // corner offsets between pre-correction and active arrays
- float mArrayDiffX, mArrayDiffY;
-
- std::vector<GridQuad> mCorrectedGrid;
- std::vector<GridQuad> mDistortedGrid;
+ DistortionMapperInfo mDistortionMapperInfo;
+ DistortionMapperInfo mDistortionMapperInfoMaximumResolution;
}; // class DistortionMapper
diff --git a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
index 1bc2081..1a39510 100644
--- a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
+++ b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
@@ -20,6 +20,7 @@
#include <algorithm>
#include "device3/ZoomRatioMapper.h"
+#include "utils/SessionConfigurationUtils.h"
namespace android {
@@ -128,43 +129,120 @@
return OK;
}
+static bool getArrayWidthAndHeight(const CameraMetadata *deviceInfo,
+ int32_t arrayTag, int32_t *width, int32_t *height) {
+ if (width == nullptr || height == nullptr) {
+ ALOGE("%s: width / height nullptr", __FUNCTION__);
+ return false;
+ }
+ camera_metadata_ro_entry_t entry;
+ entry = deviceInfo->find(arrayTag);
+ if (entry.count != 4) return false;
+ *width = entry.data.i32[2];
+ *height = entry.data.i32[3];
+ return true;
+}
+
ZoomRatioMapper::ZoomRatioMapper(const CameraMetadata* deviceInfo,
bool supportNativeZoomRatio, bool usePrecorrectArray) {
initRemappedKeys();
- camera_metadata_ro_entry_t entry;
+ int32_t arrayW = 0;
+ int32_t arrayH = 0;
+ int32_t arrayMaximumResolutionW = 0;
+ int32_t arrayMaximumResolutionH = 0;
+ int32_t activeW = 0;
+ int32_t activeH = 0;
+ int32_t activeMaximumResolutionW = 0;
+ int32_t activeMaximumResolutionH = 0;
- entry = deviceInfo->find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
- if (entry.count != 4) return;
- int32_t arrayW = entry.data.i32[2];
- int32_t arrayH = entry.data.i32[3];
+ if (!getArrayWidthAndHeight(deviceInfo, ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
+ &arrayW, &arrayH)) {
+ ALOGE("%s: Couldn't get pre correction active array size", __FUNCTION__);
+ return;
+ }
+ if (!getArrayWidthAndHeight(deviceInfo, ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+ &activeW, &activeH)) {
+ ALOGE("%s: Couldn't get active array size", __FUNCTION__);
+ return;
+ }
- entry = deviceInfo->find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
- if (entry.count != 4) return;
- int32_t activeW = entry.data.i32[2];
- int32_t activeH = entry.data.i32[3];
+ bool isUltraHighResolutionSensor =
+ camera3::SessionConfigurationUtils::isUltraHighResolutionSensor(*deviceInfo);
+ if (isUltraHighResolutionSensor) {
+ if (!getArrayWidthAndHeight(deviceInfo,
+ ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION,
+ &arrayMaximumResolutionW, &arrayMaximumResolutionH)) {
+ ALOGE("%s: Couldn't get maximum resolution pre correction active array size",
+ __FUNCTION__);
+ return;
+ }
+ if (!getArrayWidthAndHeight(deviceInfo,
+ ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION,
+ &activeMaximumResolutionW, &activeMaximumResolutionH)) {
+ ALOGE("%s: Couldn't get maximum resolution pre correction active array size",
+ __FUNCTION__);
+ return;
+ }
+ }
if (usePrecorrectArray) {
mArrayWidth = arrayW;
mArrayHeight = arrayH;
+ mArrayWidthMaximumResolution = arrayMaximumResolutionW;
+ mArrayHeightMaximumResolution = arrayMaximumResolutionH;
} else {
mArrayWidth = activeW;
mArrayHeight = activeH;
+ mArrayWidthMaximumResolution = activeMaximumResolutionW;
+ mArrayHeightMaximumResolution = activeMaximumResolutionH;
}
mHalSupportsZoomRatio = supportNativeZoomRatio;
- ALOGV("%s: array size: %d x %d, mHalSupportsZoomRatio %d",
- __FUNCTION__, mArrayWidth, mArrayHeight, mHalSupportsZoomRatio);
+ ALOGV("%s: array size: %d x %d, full res array size: %d x %d, mHalSupportsZoomRatio %d",
+ __FUNCTION__, mArrayWidth, mArrayHeight, mArrayWidthMaximumResolution,
+ mArrayHeightMaximumResolution, mHalSupportsZoomRatio);
mIsValid = true;
}
+status_t ZoomRatioMapper::getArrayDimensionsToBeUsed(const CameraMetadata *settings,
+ int32_t *arrayWidth, int32_t *arrayHeight) {
+ if (settings == nullptr || arrayWidth == nullptr || arrayHeight == nullptr) {
+ return BAD_VALUE;
+ }
+ // First we get the sensorPixelMode from the settings metadata.
+ int32_t sensorPixelMode = ANDROID_SENSOR_PIXEL_MODE_DEFAULT;
+ camera_metadata_ro_entry sensorPixelModeEntry = settings->find(ANDROID_SENSOR_PIXEL_MODE);
+ if (sensorPixelModeEntry.count != 0) {
+ sensorPixelMode = sensorPixelModeEntry.data.u8[0];
+ if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT &&
+ sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) {
+ ALOGE("%s: Request sensor pixel mode is not one of the valid values %d",
+ __FUNCTION__, sensorPixelMode);
+ return BAD_VALUE;
+ }
+ }
+ if (sensorPixelMode == ANDROID_SENSOR_PIXEL_MODE_DEFAULT) {
+ *arrayWidth = mArrayWidth;
+ *arrayHeight = mArrayHeight;
+ } else {
+ *arrayWidth = mArrayWidthMaximumResolution;
+ *arrayHeight = mArrayHeightMaximumResolution;
+ }
+ return OK;
+}
+
status_t ZoomRatioMapper::updateCaptureRequest(CameraMetadata* request) {
if (!mIsValid) return INVALID_OPERATION;
status_t res = OK;
bool zoomRatioIs1 = true;
camera_metadata_entry_t entry;
-
+ int arrayHeight, arrayWidth = 0;
+ res = getArrayDimensionsToBeUsed(request, &arrayWidth, &arrayHeight);
+ if (res != OK) {
+ return res;
+ }
entry = request->find(ANDROID_CONTROL_ZOOM_RATIO);
if (entry.count == 1 && entry.data.f[0] != 1.0f) {
zoomRatioIs1 = false;
@@ -174,19 +252,19 @@
if (cropRegionEntry.count == 4) {
int cropWidth = cropRegionEntry.data.i32[2];
int cropHeight = cropRegionEntry.data.i32[3];
- if (cropWidth < mArrayWidth && cropHeight < mArrayHeight) {
+ if (cropWidth < arrayWidth && cropHeight < arrayHeight) {
cropRegionEntry.data.i32[0] = 0;
cropRegionEntry.data.i32[1] = 0;
- cropRegionEntry.data.i32[2] = mArrayWidth;
- cropRegionEntry.data.i32[3] = mArrayHeight;
+ cropRegionEntry.data.i32[2] = arrayWidth;
+ cropRegionEntry.data.i32[3] = arrayHeight;
}
}
}
if (mHalSupportsZoomRatio && zoomRatioIs1) {
- res = separateZoomFromCropLocked(request, false/*isResult*/);
+ res = separateZoomFromCropLocked(request, false/*isResult*/, arrayWidth, arrayHeight);
} else if (!mHalSupportsZoomRatio && !zoomRatioIs1) {
- res = combineZoomAndCropLocked(request, false/*isResult*/);
+ res = combineZoomAndCropLocked(request, false/*isResult*/, arrayWidth, arrayHeight);
}
// If CONTROL_ZOOM_RATIO is in request, but HAL doesn't support
@@ -203,10 +281,15 @@
status_t res = OK;
+ int arrayHeight, arrayWidth = 0;
+ res = getArrayDimensionsToBeUsed(result, &arrayWidth, &arrayHeight);
+ if (res != OK) {
+ return res;
+ }
if (mHalSupportsZoomRatio && requestedZoomRatioIs1) {
- res = combineZoomAndCropLocked(result, true/*isResult*/);
+ res = combineZoomAndCropLocked(result, true/*isResult*/, arrayWidth, arrayHeight);
} else if (!mHalSupportsZoomRatio && !requestedZoomRatioIs1) {
- res = separateZoomFromCropLocked(result, true/*isResult*/);
+ res = separateZoomFromCropLocked(result, true/*isResult*/, arrayWidth, arrayHeight);
} else {
camera_metadata_entry_t entry = result->find(ANDROID_CONTROL_ZOOM_RATIO);
if (entry.count == 0) {
@@ -218,16 +301,22 @@
return res;
}
-float ZoomRatioMapper::deriveZoomRatio(const CameraMetadata* metadata) {
+status_t ZoomRatioMapper::deriveZoomRatio(const CameraMetadata* metadata, float *zoomRatioRet,
+ int arrayWidth, int arrayHeight) {
+ if (metadata == nullptr || zoomRatioRet == nullptr) {
+ return BAD_VALUE;
+ }
float zoomRatio = 1.0;
camera_metadata_ro_entry_t entry;
entry = metadata->find(ANDROID_SCALER_CROP_REGION);
- if (entry.count != 4) return zoomRatio;
-
+ if (entry.count != 4) {
+ *zoomRatioRet = 1;
+ return OK;
+ }
// Center of the preCorrection/active size
- float arrayCenterX = mArrayWidth / 2.0;
- float arrayCenterY = mArrayHeight / 2.0;
+ float arrayCenterX = arrayWidth / 2.0;
+ float arrayCenterY = arrayHeight / 2.0;
// Re-map crop region to coordinate system centered to (arrayCenterX,
// arrayCenterY).
@@ -237,22 +326,30 @@
float cropRegionBottom = entry.data.i32[1] + entry.data.i32[3] - arrayCenterY;
// Calculate the scaling factor for left, top, bottom, right
- float zoomRatioLeft = std::max(mArrayWidth / (2 * cropRegionLeft), 1.0f);
- float zoomRatioTop = std::max(mArrayHeight / (2 * cropRegionTop), 1.0f);
- float zoomRatioRight = std::max(mArrayWidth / (2 * cropRegionRight), 1.0f);
- float zoomRatioBottom = std::max(mArrayHeight / (2 * cropRegionBottom), 1.0f);
+ float zoomRatioLeft = std::max(arrayWidth / (2 * cropRegionLeft), 1.0f);
+ float zoomRatioTop = std::max(arrayHeight / (2 * cropRegionTop), 1.0f);
+ float zoomRatioRight = std::max(arrayWidth / (2 * cropRegionRight), 1.0f);
+ float zoomRatioBottom = std::max(arrayHeight / (2 * cropRegionBottom), 1.0f);
// Use minimum scaling factor to handle letterboxing or pillarboxing
zoomRatio = std::min(std::min(zoomRatioLeft, zoomRatioRight),
std::min(zoomRatioTop, zoomRatioBottom));
ALOGV("%s: derived zoomRatio is %f", __FUNCTION__, zoomRatio);
- return zoomRatio;
+ *zoomRatioRet = zoomRatio;
+ return OK;
}
-status_t ZoomRatioMapper::separateZoomFromCropLocked(CameraMetadata* metadata, bool isResult) {
- status_t res;
- float zoomRatio = deriveZoomRatio(metadata);
+status_t ZoomRatioMapper::separateZoomFromCropLocked(CameraMetadata* metadata, bool isResult,
+ int arrayWidth, int arrayHeight) {
+ float zoomRatio = 1.0;
+ status_t res = deriveZoomRatio(metadata, &zoomRatio, arrayWidth, arrayHeight);
+
+ if (res != OK) {
+ ALOGE("%s: Failed to derive zoom ratio: %s(%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
// Update zoomRatio metadata tag
res = metadata->update(ANDROID_CONTROL_ZOOM_RATIO, &zoomRatio, 1);
@@ -272,12 +369,14 @@
continue;
}
// Top left (inclusive)
- scaleCoordinates(entry.data.i32 + j, 1, zoomRatio, true /*clamp*/);
+ scaleCoordinates(entry.data.i32 + j, 1, zoomRatio, true /*clamp*/, arrayWidth,
+ arrayHeight);
// Bottom right (exclusive): Use adjacent inclusive pixel to
// calculate.
entry.data.i32[j+2] -= 1;
entry.data.i32[j+3] -= 1;
- scaleCoordinates(entry.data.i32 + j + 2, 1, zoomRatio, true /*clamp*/);
+ scaleCoordinates(entry.data.i32 + j + 2, 1, zoomRatio, true /*clamp*/, arrayWidth,
+ arrayHeight);
entry.data.i32[j+2] += 1;
entry.data.i32[j+3] += 1;
}
@@ -285,20 +384,22 @@
for (auto rect : kRectsToCorrect) {
entry = metadata->find(rect);
- scaleRects(entry.data.i32, entry.count / 4, zoomRatio);
+ scaleRects(entry.data.i32, entry.count / 4, zoomRatio, arrayWidth, arrayHeight);
}
if (isResult) {
for (auto pts : kResultPointsToCorrectNoClamp) {
entry = metadata->find(pts);
- scaleCoordinates(entry.data.i32, entry.count / 2, zoomRatio, false /*clamp*/);
+ scaleCoordinates(entry.data.i32, entry.count / 2, zoomRatio, false /*clamp*/,
+ arrayWidth, arrayHeight);
}
}
return OK;
}
-status_t ZoomRatioMapper::combineZoomAndCropLocked(CameraMetadata* metadata, bool isResult) {
+status_t ZoomRatioMapper::combineZoomAndCropLocked(CameraMetadata* metadata, bool isResult,
+ int arrayWidth, int arrayHeight) {
float zoomRatio = 1.0f;
camera_metadata_entry_t entry;
entry = metadata->find(ANDROID_CONTROL_ZOOM_RATIO);
@@ -307,7 +408,6 @@
}
// Unscale regions with zoomRatio
- status_t res;
for (auto region : kMeteringRegionsToCorrect) {
entry = metadata->find(region);
for (size_t j = 0; j < entry.count; j += 5) {
@@ -316,29 +416,32 @@
continue;
}
// Top-left (inclusive)
- scaleCoordinates(entry.data.i32 + j, 1, 1.0 / zoomRatio, true /*clamp*/);
+ scaleCoordinates(entry.data.i32 + j, 1, 1.0 / zoomRatio, true /*clamp*/, arrayWidth,
+ arrayHeight);
// Bottom-right (exclusive): Use adjacent inclusive pixel to
// calculate.
entry.data.i32[j+2] -= 1;
entry.data.i32[j+3] -= 1;
- scaleCoordinates(entry.data.i32 + j + 2, 1, 1.0 / zoomRatio, true /*clamp*/);
+ scaleCoordinates(entry.data.i32 + j + 2, 1, 1.0 / zoomRatio, true /*clamp*/, arrayWidth,
+ arrayHeight);
entry.data.i32[j+2] += 1;
entry.data.i32[j+3] += 1;
}
}
for (auto rect : kRectsToCorrect) {
entry = metadata->find(rect);
- scaleRects(entry.data.i32, entry.count / 4, 1.0 / zoomRatio);
+ scaleRects(entry.data.i32, entry.count / 4, 1.0 / zoomRatio, arrayWidth, arrayHeight);
}
if (isResult) {
for (auto pts : kResultPointsToCorrectNoClamp) {
entry = metadata->find(pts);
- scaleCoordinates(entry.data.i32, entry.count / 2, 1.0 / zoomRatio, false /*clamp*/);
+ scaleCoordinates(entry.data.i32, entry.count / 2, 1.0 / zoomRatio, false /*clamp*/,
+ arrayWidth, arrayHeight);
}
}
zoomRatio = 1.0;
- res = metadata->update(ANDROID_CONTROL_ZOOM_RATIO, &zoomRatio, 1);
+ status_t res = metadata->update(ANDROID_CONTROL_ZOOM_RATIO, &zoomRatio, 1);
if (res != OK) {
return res;
}
@@ -347,7 +450,7 @@
}
void ZoomRatioMapper::scaleCoordinates(int32_t* coordPairs, int coordCount,
- float scaleRatio, bool clamp) {
+ float scaleRatio, bool clamp, int32_t arrayWidth, int32_t arrayHeight) {
// A pixel's coordinate is represented by the position of its top-left corner.
// To avoid the rounding error, we use the coordinate for the center of the
// pixel instead:
@@ -360,18 +463,18 @@
for (int i = 0; i < coordCount * 2; i += 2) {
float x = coordPairs[i];
float y = coordPairs[i + 1];
- float xCentered = x - (mArrayWidth - 2) / 2;
- float yCentered = y - (mArrayHeight - 2) / 2;
+ float xCentered = x - (arrayWidth - 2) / 2;
+ float yCentered = y - (arrayHeight - 2) / 2;
float scaledX = xCentered * scaleRatio;
float scaledY = yCentered * scaleRatio;
- scaledX += (mArrayWidth - 2) / 2;
- scaledY += (mArrayHeight - 2) / 2;
+ scaledX += (arrayWidth - 2) / 2;
+ scaledY += (arrayHeight - 2) / 2;
coordPairs[i] = static_cast<int32_t>(std::round(scaledX));
coordPairs[i+1] = static_cast<int32_t>(std::round(scaledY));
// Clamp to within activeArray/preCorrectionActiveArray
if (clamp) {
- int32_t right = mArrayWidth - 1;
- int32_t bottom = mArrayHeight - 1;
+ int32_t right = arrayWidth - 1;
+ int32_t bottom = arrayHeight - 1;
coordPairs[i] =
std::min(right, std::max(0, coordPairs[i]));
coordPairs[i+1] =
@@ -382,7 +485,7 @@
}
void ZoomRatioMapper::scaleRects(int32_t* rects, int rectCount,
- float scaleRatio) {
+ float scaleRatio, int32_t arrayWidth, int32_t arrayHeight) {
for (int i = 0; i < rectCount * 4; i += 4) {
// Map from (l, t, width, height) to (l, t, l+width-1, t+height-1),
// where both top-left and bottom-right are inclusive.
@@ -394,9 +497,9 @@
};
// top-left
- scaleCoordinates(coords, 1, scaleRatio, true /*clamp*/);
+ scaleCoordinates(coords, 1, scaleRatio, true /*clamp*/, arrayWidth, arrayHeight);
// bottom-right
- scaleCoordinates(coords+2, 1, scaleRatio, true /*clamp*/);
+ scaleCoordinates(coords+2, 1, scaleRatio, true /*clamp*/, arrayWidth, arrayHeight);
// Map back to (l, t, width, height)
rects[i] = coords[0];
diff --git a/services/camera/libcameraservice/device3/ZoomRatioMapper.h b/services/camera/libcameraservice/device3/ZoomRatioMapper.h
index 3769299..b7a9e41 100644
--- a/services/camera/libcameraservice/device3/ZoomRatioMapper.h
+++ b/services/camera/libcameraservice/device3/ZoomRatioMapper.h
@@ -68,22 +68,31 @@
public: // Visible for testing. Do not use concurently.
void scaleCoordinates(int32_t* coordPairs, int coordCount,
- float scaleRatio, bool clamp);
+ float scaleRatio, bool clamp, int32_t arrayWidth, int32_t arrayHeight);
bool isValid() { return mIsValid; }
private:
// const after construction
bool mHalSupportsZoomRatio;
- // active array / pre-correction array dimension
+
+ // active array / pre-correction array dimension for default and maximum
+ // resolution modes.
int32_t mArrayWidth, mArrayHeight;
+ int32_t mArrayWidthMaximumResolution, mArrayHeightMaximumResolution;
bool mIsValid = false;
- float deriveZoomRatio(const CameraMetadata* metadata);
- void scaleRects(int32_t* rects, int rectCount, float scaleRatio);
+ status_t deriveZoomRatio(const CameraMetadata* metadata, float *zoomRatio, int arrayWidth,
+ int arrayHeight);
+ void scaleRects(int32_t* rects, int rectCount, float scaleRatio, int32_t arrayWidth,
+ int32_t arrayHeight);
- status_t separateZoomFromCropLocked(CameraMetadata* metadata, bool isResult);
- status_t combineZoomAndCropLocked(CameraMetadata* metadata, bool isResult);
+ status_t separateZoomFromCropLocked(CameraMetadata* metadata, bool isResult, int arrayWidth,
+ int arrayHeight);
+ status_t combineZoomAndCropLocked(CameraMetadata* metadata, bool isResult, int arrayWidth,
+ int arrayHeight);
+ status_t getArrayDimensionsToBeUsed(const CameraMetadata *settings, int32_t *arrayWidth,
+ int32_t *arrayHeight);
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/fuzzer/DistortionMapperFuzzer.cpp b/services/camera/libcameraservice/fuzzer/DistortionMapperFuzzer.cpp
index 96bab4e..88ec85c 100644
--- a/services/camera/libcameraservice/fuzzer/DistortionMapperFuzzer.cpp
+++ b/services/camera/libcameraservice/fuzzer/DistortionMapperFuzzer.cpp
@@ -23,6 +23,7 @@
using namespace android;
using namespace android::camera3;
+using DistortionMapperInfo = android::camera3::DistortionMapper::DistortionMapperInfo;
int32_t testActiveArray[] = {100, 100, 1000, 750};
float testICal[] = { 1000.f, 1000.f, 500.f, 500.f, 0.f };
@@ -62,10 +63,10 @@
for (int index = 0; fdp.remaining_bytes() > 0; index++) {
input.push_back(fdp.ConsumeIntegral<int32_t>());
}
-
+ DistortionMapperInfo *mapperInfo = m.getMapperInfo();
// The size argument counts how many coordinate pairs there are, so
// it is expected to be 1/2 the size of the input.
- m.mapCorrectedToRaw(input.data(), input.size()/2, clamp, simple);
+ m.mapCorrectedToRaw(input.data(), input.size()/2, mapperInfo, clamp, simple);
return 0;
}
diff --git a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
index 54935c9..8331136 100644
--- a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
+++ b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
@@ -27,7 +27,7 @@
using namespace android;
using namespace android::camera3;
-
+using DistortionMapperInfo = android::camera3::DistortionMapper::DistortionMapperInfo;
int32_t testActiveArray[] = {100, 100, 1000, 750};
int32_t testPreCorrActiveArray[] = {90, 90, 1020, 770};
@@ -132,14 +132,15 @@
/*preCorrectionActiveArray*/ testActiveArray);
auto coords = basicCoords;
- res = m.mapCorrectedToRaw(coords.data(), 5, /*clamp*/true);
+ DistortionMapperInfo *mapperInfo = m.getMapperInfo();
+ res = m.mapCorrectedToRaw(coords.data(), 5, mapperInfo, /*clamp*/true);
ASSERT_EQ(res, OK);
for (size_t i = 0; i < coords.size(); i++) {
EXPECT_EQ(coords[i], basicCoords[i]);
}
- res = m.mapRawToCorrected(coords.data(), 5, /*clamp*/true);
+ res = m.mapRawToCorrected(coords.data(), 5, mapperInfo, /*clamp*/true);
ASSERT_EQ(res, OK);
for (size_t i = 0; i < coords.size(); i++) {
@@ -152,14 +153,14 @@
};
auto rectsOrig = rects;
- res = m.mapCorrectedRectToRaw(rects.data(), 2, /*clamp*/true);
+ res = m.mapCorrectedRectToRaw(rects.data(), 2, mapperInfo, /*clamp*/true);
ASSERT_EQ(res, OK);
for (size_t i = 0; i < rects.size(); i++) {
EXPECT_EQ(rects[i], rectsOrig[i]);
}
- res = m.mapRawRectToCorrected(rects.data(), 2, /*clamp*/true);
+ res = m.mapRawRectToCorrected(rects.data(), 2, mapperInfo, /*clamp*/true);
ASSERT_EQ(res, OK);
for (size_t i = 0; i < rects.size(); i++) {
@@ -176,14 +177,17 @@
/*preCorrectionActiveArray*/ activeArray.data());
auto rectsOrig = activeArray;
- res = m.mapCorrectedRectToRaw(activeArray.data(), 1, /*clamp*/true, /*simple*/ true);
+ DistortionMapperInfo *mapperInfo = m.getMapperInfo();
+ res = m.mapCorrectedRectToRaw(activeArray.data(), 1, mapperInfo, /*clamp*/true,
+ /*simple*/ true);
ASSERT_EQ(res, OK);
for (size_t i = 0; i < activeArray.size(); i++) {
EXPECT_EQ(activeArray[i], rectsOrig[i]);
}
- res = m.mapRawRectToCorrected(activeArray.data(), 1, /*clamp*/true, /*simple*/ true);
+ res = m.mapRawRectToCorrected(activeArray.data(), 1, mapperInfo, /*clamp*/true,
+ /*simple*/ true);
ASSERT_EQ(res, OK);
for (size_t i = 0; i < activeArray.size(); i++) {
@@ -200,7 +204,8 @@
/*preCorrectionActiveArray*/ testPreCorrActiveArray);
auto coords = basicCoords;
- res = m.mapCorrectedToRaw(coords.data(), 5, /*clamp*/true, /*simple*/true);
+ DistortionMapperInfo *mapperInfo = m.getMapperInfo();
+ res = m.mapCorrectedToRaw(coords.data(), 5, mapperInfo, /*clamp*/true, /*simple*/true);
ASSERT_EQ(res, OK);
ASSERT_EQ(coords[0], 0); ASSERT_EQ(coords[1], 0);
@@ -237,12 +242,13 @@
auto origCoords = randCoords;
base::Timer correctedToRawTimer;
- res = m.mapCorrectedToRaw(randCoords.data(), randCoords.size() / 2, clamp, simple);
+ DistortionMapperInfo *mapperInfo = m.getMapperInfo();
+ res = m.mapCorrectedToRaw(randCoords.data(), randCoords.size() / 2, mapperInfo, clamp, simple);
auto correctedToRawDurationMs = correctedToRawTimer.duration();
EXPECT_EQ(res, OK);
base::Timer rawToCorrectedTimer;
- res = m.mapRawToCorrected(randCoords.data(), randCoords.size() / 2, clamp, simple);
+ res = m.mapRawToCorrected(randCoords.data(), randCoords.size() / 2, mapperInfo, clamp, simple);
auto rawToCorrectedDurationMs = rawToCorrectedTimer.duration();
EXPECT_EQ(res, OK);
@@ -363,7 +369,8 @@
using namespace openCvData;
- res = m.mapRawToCorrected(rawCoords.data(), rawCoords.size() / 2, /*clamp*/false,
+ DistortionMapperInfo *mapperInfo = m.getMapperInfo();
+ res = m.mapRawToCorrected(rawCoords.data(), rawCoords.size() / 2, mapperInfo, /*clamp*/false,
/*simple*/false);
for (size_t i = 0; i < rawCoords.size(); i+=2) {
diff --git a/services/camera/libcameraservice/tests/ZoomRatioTest.cpp b/services/camera/libcameraservice/tests/ZoomRatioTest.cpp
index 4e94991..ff7aafd 100644
--- a/services/camera/libcameraservice/tests/ZoomRatioTest.cpp
+++ b/services/camera/libcameraservice/tests/ZoomRatioTest.cpp
@@ -182,7 +182,7 @@
// Verify 1.0x zoom doesn't change the coordinates
auto coords = originalCoords;
- mapper.scaleCoordinates(coords.data(), coords.size()/2, 1.0f, false /*clamp*/);
+ mapper.scaleCoordinates(coords.data(), coords.size()/2, 1.0f, false /*clamp*/, width, height);
for (size_t i = 0; i < coords.size(); i++) {
EXPECT_EQ(coords[i], originalCoords[i]);
}
@@ -199,7 +199,7 @@
(width - 1) * 5.0f / 4.0f, (height - 1) / 2.0f, // middle-right after 1.33x zoom
};
coords = originalCoords;
- mapper.scaleCoordinates(coords.data(), coords.size()/2, 2.0f, false /*clamp*/);
+ mapper.scaleCoordinates(coords.data(), coords.size()/2, 2.0f, false /*clamp*/, width, height);
for (size_t i = 0; i < coords.size(); i++) {
EXPECT_LE(std::abs(coords[i] - expected2xCoords[i]), kMaxAllowedPixelError);
}
@@ -216,7 +216,7 @@
width - 1.0f, (height - 1) / 2.0f, // middle-right after 1.33x zoom
};
coords = originalCoords;
- mapper.scaleCoordinates(coords.data(), coords.size()/2, 2.0f, true /*clamp*/);
+ mapper.scaleCoordinates(coords.data(), coords.size()/2, 2.0f, true /*clamp*/, width, height);
for (size_t i = 0; i < coords.size(); i++) {
EXPECT_LE(std::abs(coords[i] - expected2xCoordsClampedInc[i]), kMaxAllowedPixelError);
}
@@ -233,7 +233,7 @@
width - 1.0f, height / 2.0f, // middle-right after 1.33x zoom
};
coords = originalCoords;
- mapper.scaleCoordinates(coords.data(), coords.size()/2, 2.0f, true /*clamp*/);
+ mapper.scaleCoordinates(coords.data(), coords.size()/2, 2.0f, true /*clamp*/, width, height);
for (size_t i = 0; i < coords.size(); i++) {
EXPECT_LE(std::abs(coords[i] - expected2xCoordsClampedExc[i]), kMaxAllowedPixelError);
}
@@ -250,7 +250,7 @@
(width - 1) * 5 / 8.0f, (height - 1) / 2.0f, // middle-right after 1.33x zoom-in
};
coords = originalCoords;
- mapper.scaleCoordinates(coords.data(), coords.size()/2, 1.0f/3, false /*clamp*/);
+ mapper.scaleCoordinates(coords.data(), coords.size()/2, 1.0f/3, false /*clamp*/, width, height);
for (size_t i = 0; i < coords.size(); i++) {
EXPECT_LE(std::abs(coords[i] - expectedZoomOutCoords[i]), kMaxAllowedPixelError);
}
diff --git a/services/camera/libcameraservice/utils/ExifUtils.cpp b/services/camera/libcameraservice/utils/ExifUtils.cpp
index 8a0303a..485705c 100644
--- a/services/camera/libcameraservice/utils/ExifUtils.cpp
+++ b/services/camera/libcameraservice/utils/ExifUtils.cpp
@@ -916,11 +916,25 @@
ALOGV("%s: Cannot find focal length in metadata.", __FUNCTION__);
}
+ int32_t sensorPixelMode = ANDROID_SENSOR_PIXEL_MODE_DEFAULT;
+ camera_metadata_ro_entry sensorPixelModeEntry = metadata.find(ANDROID_SENSOR_PIXEL_MODE);
+ if (sensorPixelModeEntry.count != 0) {
+ sensorPixelMode = sensorPixelModeEntry.data.u8[0];
+ if (sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_DEFAULT ||
+ sensorPixelMode != ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) {
+ ALOGE("%s: Request sensor pixel mode is not one of the valid values %d",
+ __FUNCTION__, sensorPixelMode);
+ return false;
+ }
+ }
+ int32_t activeArrayTag = sensorPixelMode == ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION ?
+ ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION :
+ ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE;
if (metadata.exists(ANDROID_SCALER_CROP_REGION) &&
- staticInfo.exists(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE)) {
+ staticInfo.exists(activeArrayTag)) {
entry = metadata.find(ANDROID_SCALER_CROP_REGION);
camera_metadata_ro_entry activeArrayEntry =
- staticInfo.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
+ staticInfo.find(activeArrayTag);
if (!setDigitalZoomRatio(entry.data.i32[2], entry.data.i32[3],
activeArrayEntry.data.i32[2], activeArrayEntry.data.i32[3])) {
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index 8f42a85..6dcf440 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -21,22 +21,115 @@
#include "device3/Camera3Device.h"
#include "device3/Camera3OutputStream.h"
-// Convenience methods for constructing binder::Status objects for error returns
-
-#define STATUS_ERROR(errorCode, errorString) \
- binder::Status::fromServiceSpecificError(errorCode, \
- String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
-
-#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
- binder::Status::fromServiceSpecificError(errorCode, \
- String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, \
- __VA_ARGS__))
-
using android::camera3::OutputStreamInfo;
using android::camera3::OutputStreamInfo;
using android::hardware::camera2::ICameraDeviceUser;
+using android::hardware::camera::metadata::V3_6::CameraMetadataEnumAndroidSensorPixelMode;
namespace android {
+namespace camera3 {
+
+void StreamConfiguration::getStreamConfigurations(
+ const CameraMetadata &staticInfo, int configuration,
+ std::unordered_map<int, std::vector<StreamConfiguration>> *scm) {
+ if (scm == nullptr) {
+ ALOGE("%s: StreamConfigurationMap nullptr", __FUNCTION__);
+ return;
+ }
+ const int STREAM_FORMAT_OFFSET = 0;
+ const int STREAM_WIDTH_OFFSET = 1;
+ const int STREAM_HEIGHT_OFFSET = 2;
+ const int STREAM_IS_INPUT_OFFSET = 3;
+
+ camera_metadata_ro_entry availableStreamConfigs = staticInfo.find(configuration);
+ for (size_t i = 0; i < availableStreamConfigs.count; i += 4) {
+ int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
+ int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
+ int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
+ int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
+ StreamConfiguration sc = {format, width, height, isInput};
+ (*scm)[format].push_back(sc);
+ }
+}
+
+void StreamConfiguration::getStreamConfigurations(
+ const CameraMetadata &staticInfo, bool maxRes,
+ std::unordered_map<int, std::vector<StreamConfiguration>> *scm) {
+ int32_t scalerKey =
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, maxRes);
+
+ int32_t depthKey =
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, maxRes);
+
+ int32_t dynamicDepthKey =
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS);
+
+ int32_t heicKey =
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS);
+
+ getStreamConfigurations(staticInfo, scalerKey, scm);
+ getStreamConfigurations(staticInfo, depthKey, scm);
+ getStreamConfigurations(staticInfo, dynamicDepthKey, scm);
+ getStreamConfigurations(staticInfo, heicKey, scm);
+}
+
+int32_t SessionConfigurationUtils::getAppropriateModeTag(int32_t defaultTag, bool maxResolution) {
+ if (!maxResolution) {
+ return defaultTag;
+ }
+ switch (defaultTag) {
+ case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS:
+ return ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS:
+ return ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_SCALER_AVAILABLE_STALL_DURATIONS:
+ return ANDROID_SCALER_AVAILABLE_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS:
+ return ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS:
+ return ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS:
+ return ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS;
+ case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS:
+ return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS:
+ return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS:
+ return ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS;
+ case ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS:
+ return ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS:
+ return ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS:
+ return ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS;
+ case ANDROID_SENSOR_OPAQUE_RAW_SIZE:
+ return ANDROID_SENSOR_OPAQUE_RAW_SIZE_MAXIMUM_RESOLUTION;
+ case ANDROID_LENS_INTRINSIC_CALIBRATION:
+ return ANDROID_LENS_INTRINSIC_CALIBRATION_MAXIMUM_RESOLUTION;
+ case ANDROID_LENS_DISTORTION:
+ return ANDROID_LENS_DISTORTION_MAXIMUM_RESOLUTION;
+ default:
+ ALOGE("%s: Tag %d doesn't have a maximum resolution counterpart", __FUNCTION__,
+ defaultTag);
+ return -1;
+ }
+ return -1;
+}
+
+
+StreamConfigurationPair
+SessionConfigurationUtils::getStreamConfigurationPair(const CameraMetadata &staticInfo) {
+ camera3::StreamConfigurationPair streamConfigurationPair;
+ camera3::StreamConfiguration::getStreamConfigurations(staticInfo, false,
+ &streamConfigurationPair.mDefaultStreamConfigurationMap);
+ camera3::StreamConfiguration::getStreamConfigurations(staticInfo, true,
+ &streamConfigurationPair.mMaximumResolutionStreamConfigurationMap);
+ return streamConfigurationPair;
+}
int64_t SessionConfigurationUtils::euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1) {
int64_t d0 = x0 - x1;
@@ -45,15 +138,22 @@
}
bool SessionConfigurationUtils::roundBufferDimensionNearest(int32_t width, int32_t height,
- int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
- /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
+ int32_t format, android_dataspace dataSpace,
+ const CameraMetadata& info, bool maxResolution, /*out*/int32_t* outWidth,
+ /*out*/int32_t* outHeight) {
+ const int32_t depthSizesTag =
+ getAppropriateModeTag(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS,
+ maxResolution);
+ const int32_t scalerSizesTag =
+ getAppropriateModeTag(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, maxResolution);
+ const int32_t heicSizesTag =
+ getAppropriateModeTag(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS, maxResolution);
camera_metadata_ro_entry streamConfigs =
- (dataSpace == HAL_DATASPACE_DEPTH) ?
- info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) :
+ (dataSpace == HAL_DATASPACE_DEPTH) ? info.find(depthSizesTag) :
(dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_HEIF)) ?
- info.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS) :
- info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+ info.find(heicSizesTag) :
+ info.find(scalerSizesTag);
int32_t bestWidth = -1;
int32_t bestHeight = -1;
@@ -128,11 +228,11 @@
binder::Status SessionConfigurationUtils::createSurfaceFromGbp(
OutputStreamInfo& streamInfo, bool isStreamInfoValid,
sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
- const String8 &cameraId, const CameraMetadata &physicalCameraMetadata) {
-
+ const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
+ const std::vector<int32_t> &sensorPixelModesUsed){
// bufferProducer must be non-null
if (gbp == nullptr) {
- String8 msg = String8::format("Camera %s: Surface is NULL", cameraId.string());
+ String8 msg = String8::format("Camera %s: Surface is NULL", logicalCameraId.string());
ALOGW("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
@@ -144,13 +244,13 @@
status_t err;
if ((err = gbp->getConsumerUsage(&consumerUsage)) != OK) {
String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
- cameraId.string(), strerror(-err), err);
+ logicalCameraId.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
ALOGW("%s: Camera %s with consumer usage flag: %" PRIu64 ": Forcing asynchronous mode for"
- "stream", __FUNCTION__, cameraId.string(), consumerUsage);
+ "stream", __FUNCTION__, logicalCameraId.string(), consumerUsage);
useAsync = true;
}
@@ -169,26 +269,26 @@
android_dataspace dataSpace;
if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
- cameraId.string(), strerror(-err), err);
+ logicalCameraId.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
- cameraId.string(), strerror(-err), err);
+ logicalCameraId.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
- cameraId.string(), strerror(-err), err);
+ logicalCameraId.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
reinterpret_cast<int*>(&dataSpace))) != OK) {
String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
- cameraId.string(), strerror(-err), err);
+ logicalCameraId.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
@@ -199,16 +299,31 @@
((consumerUsage & GRALLOC_USAGE_HW_MASK) &&
((consumerUsage & GRALLOC_USAGE_SW_READ_MASK) == 0))) {
ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
- __FUNCTION__, cameraId.string(), format);
+ __FUNCTION__, logicalCameraId.string(), format);
format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
}
+ std::unordered_set<int32_t> overriddenSensorPixelModes;
+ if (checkAndOverrideSensorPixelModesUsed(sensorPixelModesUsed, format, width, height,
+ physicalCameraMetadata, flexibleConsumer, &overriddenSensorPixelModes) != OK) {
+ String8 msg = String8::format("Camera %s: sensor pixel modes for stream with "
+ "format %#x are not valid",logicalCameraId.string(), format);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ bool foundInMaxRes = false;
+ if (overriddenSensorPixelModes.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
+ overriddenSensorPixelModes.end()) {
+ // we can use the default stream configuration map
+ foundInMaxRes = true;
+ }
// Round dimensions to the nearest dimensions available for this format
if (flexibleConsumer && isPublicFormat(format) &&
!SessionConfigurationUtils::roundBufferDimensionNearest(width, height,
- format, dataSpace, physicalCameraMetadata, /*out*/&width, /*out*/&height)) {
+ format, dataSpace, physicalCameraMetadata, foundInMaxRes, /*out*/&width,
+ /*out*/&height)) {
String8 msg = String8::format("Camera %s: No supported stream configurations with "
"format %#x defined, failed to create output stream",
- cameraId.string(), format);
+ logicalCameraId.string(), format);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
@@ -219,30 +334,31 @@
streamInfo.format = format;
streamInfo.dataSpace = dataSpace;
streamInfo.consumerUsage = consumerUsage;
+ streamInfo.sensorPixelModesUsed = overriddenSensorPixelModes;
return binder::Status::ok();
}
if (width != streamInfo.width) {
String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
- cameraId.string(), width, streamInfo.width);
+ logicalCameraId.string(), width, streamInfo.width);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
if (height != streamInfo.height) {
String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
- cameraId.string(), height, streamInfo.height);
+ logicalCameraId.string(), height, streamInfo.height);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
if (format != streamInfo.format) {
String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
- cameraId.string(), format, streamInfo.format);
+ logicalCameraId.string(), format, streamInfo.format);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
if (dataSpace != streamInfo.dataSpace) {
String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
- cameraId.string(), dataSpace, streamInfo.dataSpace);
+ logicalCameraId.string(), dataSpace, streamInfo.dataSpace);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
@@ -251,7 +367,7 @@
if (consumerUsage != streamInfo.consumerUsage) {
String8 msg = String8::format(
"Camera %s:Surface usage flag doesn't match %" PRIu64 " vs %" PRIu64 "",
- cameraId.string(), consumerUsage, streamInfo.consumerUsage);
+ logicalCameraId.string(), consumerUsage, streamInfo.consumerUsage);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
@@ -259,7 +375,6 @@
return binder::Status::ok();
}
-
void SessionConfigurationUtils::mapStreamInfo(const OutputStreamInfo &streamInfo,
camera3::camera_stream_rotation_t rotation, String8 physicalId,
int32_t groupId, hardware::camera::device::V3_7::Stream *stream /*out*/) {
@@ -280,6 +395,12 @@
stream->v3_4.physicalCameraId = std::string(physicalId.string());
stream->v3_4.bufferSize = 0;
stream->groupId = groupId;
+ stream->sensorPixelModesUsed.resize(streamInfo.sensorPixelModesUsed.size());
+ size_t idx = 0;
+ for (auto mode : streamInfo.sensorPixelModesUsed) {
+ stream->sensorPixelModesUsed[idx++] =
+ static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
+ }
}
binder::Status SessionConfigurationUtils::checkPhysicalCameraId(
@@ -394,6 +515,11 @@
streamConfiguration.streams.resize(streamCount);
size_t streamIdx = 0;
if (isInputValid) {
+ hardware::hidl_vec<CameraMetadataEnumAndroidSensorPixelMode> defaultSensorPixelModes;
+ defaultSensorPixelModes.resize(1);
+ defaultSensorPixelModes[0] =
+ static_cast<CameraMetadataEnumAndroidSensorPixelMode>(
+ ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
streamConfiguration.streams[streamIdx++] = {{{/*streamId*/0,
hardware::camera::device::V3_2::StreamType::INPUT,
static_cast<uint32_t> (sessionConfiguration.getInputWidth()),
@@ -401,7 +527,7 @@
Camera3Device::mapToPixelFormat(sessionConfiguration.getInputFormat()),
/*usage*/ 0, HAL_DATASPACE_UNKNOWN,
hardware::camera::device::V3_2::StreamRotation::ROTATION_0},
- /*physicalId*/ nullptr, /*bufferSize*/0}, /*groupId*/-1};
+ /*physicalId*/ nullptr, /*bufferSize*/0}, /*groupId*/-1, defaultSensorPixelModes};
streamConfiguration.multiResolutionInputImage =
sessionConfiguration.inputIsMultiResolution();
}
@@ -411,6 +537,12 @@
it.getGraphicBufferProducers();
bool deferredConsumer = it.isDeferred();
String8 physicalCameraId = String8(it.getPhysicalCameraId());
+
+ std::vector<int32_t> sensorPixelModesUsed = it.getSensorPixelModesUsed();
+ const CameraMetadata &physicalDeviceInfo = getMetadata(physicalCameraId);
+ const CameraMetadata &metadataChosen =
+ physicalCameraId.size() > 0 ? physicalDeviceInfo : deviceInfo;
+
size_t numBufferProducers = bufferProducers.size();
bool isStreamInfoValid = false;
int32_t groupId = it.isMultiResolution() ? it.getSurfaceSetID() : -1;
@@ -436,6 +568,15 @@
if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
streamInfo.consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
}
+ if (checkAndOverrideSensorPixelModesUsed(sensorPixelModesUsed,
+ streamInfo.format, streamInfo.width,
+ streamInfo.height, metadataChosen, false /*flexibleConsumer*/,
+ &streamInfo.sensorPixelModesUsed) != OK) {
+ ALOGE("%s: Deferred surface sensor pixel modes not valid",
+ __FUNCTION__);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Deferred surface sensor pixel modes not valid");
+ }
mapStreamInfo(streamInfo, camera3::CAMERA_STREAM_ROTATION_0, physicalCameraId, groupId,
&streamConfiguration.streams[streamIdx++]);
isStreamInfoValid = true;
@@ -447,10 +588,8 @@
for (auto& bufferProducer : bufferProducers) {
sp<Surface> surface;
- const CameraMetadata &physicalDeviceInfo = getMetadata(physicalCameraId);
res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
- logicalCameraId,
- physicalCameraId.size() > 0 ? physicalDeviceInfo : deviceInfo );
+ logicalCameraId, metadataChosen, sensorPixelModesUsed);
if (!res.isOk())
return res;
@@ -465,6 +604,7 @@
// additional internal camera streams.
std::vector<OutputStreamInfo> compositeStreams;
if (isDepthCompositeStream) {
+ // TODO: Take care of composite streams.
ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
deviceInfo, &compositeStreams);
} else {
@@ -505,7 +645,97 @@
}
}
return binder::Status::ok();
+}
+static bool inStreamConfigurationMap(int format, int width, int height,
+ const std::unordered_map<int, std::vector<camera3::StreamConfiguration>> &sm) {
+ auto scs = sm.find(format);
+ if (scs == sm.end()) {
+ return false;
+ }
+ for (auto &sc : scs->second) {
+ if (sc.width == width && sc.height == height && sc.isInput == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static std::unordered_set<int32_t> convertToSet(const std::vector<int32_t> &sensorPixelModesUsed) {
+ return std::unordered_set<int32_t>(sensorPixelModesUsed.begin(), sensorPixelModesUsed.end());
+}
+
+status_t SessionConfigurationUtils::checkAndOverrideSensorPixelModesUsed(
+ const std::vector<int32_t> &sensorPixelModesUsed, int format, int width, int height,
+ const CameraMetadata &staticInfo, bool flexibleConsumer,
+ std::unordered_set<int32_t> *overriddenSensorPixelModesUsed) {
+ if (!isUltraHighResolutionSensor(staticInfo)) {
+ overriddenSensorPixelModesUsed->clear();
+ overriddenSensorPixelModesUsed->insert(ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
+ return OK;
+ }
+
+ StreamConfigurationPair streamConfigurationPair = getStreamConfigurationPair(staticInfo);
+ const std::unordered_set<int32_t> &sensorPixelModesUsedSet =
+ convertToSet(sensorPixelModesUsed);
+ bool isInDefaultStreamConfigurationMap =
+ inStreamConfigurationMap(format, width, height,
+ streamConfigurationPair.mDefaultStreamConfigurationMap);
+
+ bool isInMaximumResolutionStreamConfigurationMap =
+ inStreamConfigurationMap(format, width, height,
+ streamConfigurationPair.mMaximumResolutionStreamConfigurationMap);
+
+ // Case 1: The client has not changed the sensor mode defaults. In this case, we check if the
+ // size + format of the OutputConfiguration is found exclusively in 1.
+ // If yes, add that sensorPixelMode to overriddenSensorPixelModes.
+ // If no, add 'DEFAULT' to sensorPixelMode. This maintains backwards
+ // compatibility.
+ if (sensorPixelModesUsedSet.size() == 0) {
+ // Ambiguous case, default to only 'DEFAULT' mode.
+ if (isInDefaultStreamConfigurationMap && isInMaximumResolutionStreamConfigurationMap) {
+ overriddenSensorPixelModesUsed->insert(ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
+ return OK;
+ }
+ // We don't allow flexible consumer for max resolution mode.
+ if (isInMaximumResolutionStreamConfigurationMap) {
+ overriddenSensorPixelModesUsed->insert(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION);
+ return OK;
+ }
+ if (isInDefaultStreamConfigurationMap || (flexibleConsumer && width < ROUNDING_WIDTH_CAP)) {
+ overriddenSensorPixelModesUsed->insert(ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
+ return OK;
+ }
+ return BAD_VALUE;
+ }
+
+ // Case2: The app has set sensorPixelModesUsed, we need to verify that they
+ // are valid / err out.
+ if (sensorPixelModesUsedSet.find(ANDROID_SENSOR_PIXEL_MODE_DEFAULT) !=
+ sensorPixelModesUsedSet.end() && !isInDefaultStreamConfigurationMap) {
+ return BAD_VALUE;
+ }
+
+ if (sensorPixelModesUsedSet.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
+ sensorPixelModesUsedSet.end() && !isInMaximumResolutionStreamConfigurationMap) {
+ return BAD_VALUE;
+ }
+ *overriddenSensorPixelModesUsed = sensorPixelModesUsedSet;
+ return OK;
+}
+
+bool SessionConfigurationUtils::isUltraHighResolutionSensor(const CameraMetadata &deviceInfo) {
+ camera_metadata_ro_entry_t entryCap;
+ entryCap = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ // Go through the capabilities and check if it has
+ // ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR
+ for (size_t i = 0; i < entryCap.count; ++i) {
+ uint8_t capability = entryCap.data.u8[i];
+ if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR) {
+ return true;
+ }
+ }
+ return false;
}
bool SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
@@ -531,4 +761,5 @@
return true;
}
-}// namespace android
+} // namespace camera3
+} // namespace android
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 36e1dd7..863a0cd 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -22,24 +22,60 @@
#include <camera/camera2/SessionConfiguration.h>
#include <camera/camera2/SubmitInfo.h>
#include <android/hardware/camera/device/3.7/types.h>
+#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
+#include <android/hardware/camera/device/3.7/ICameraDeviceSession.h>
#include <device3/Camera3StreamInterface.h>
#include <stdint.h>
+// Convenience methods for constructing binder::Status objects for error returns
+
+#define STATUS_ERROR(errorCode, errorString) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
+
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, \
+ __VA_ARGS__))
+
namespace android {
+namespace camera3 {
typedef std::function<CameraMetadata (const String8 &)> metadataGetter;
+class StreamConfiguration {
+public:
+ int32_t format;
+ int32_t width;
+ int32_t height;
+ int32_t isInput;
+ static void getStreamConfigurations(
+ const CameraMetadata &static_info, bool maxRes,
+ std::unordered_map<int, std::vector<StreamConfiguration>> *scm);
+ static void getStreamConfigurations(
+ const CameraMetadata &static_info, int configuration,
+ std::unordered_map<int, std::vector<StreamConfiguration>> *scm);
+};
+
+// Holds the default StreamConfigurationMap and Maximum resolution
+// StreamConfigurationMap for a camera device.
+struct StreamConfigurationPair {
+ std::unordered_map<int, std::vector<camera3::StreamConfiguration>>
+ mDefaultStreamConfigurationMap;
+ std::unordered_map<int, std::vector<camera3::StreamConfiguration>>
+ mMaximumResolutionStreamConfigurationMap;
+};
+
class SessionConfigurationUtils {
public:
-
static int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
// Find the closest dimensions for a given format in available stream configurations with
// a width <= ROUNDING_WIDTH_CAP
static bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
- android_dataspace dataSpace, const CameraMetadata& info,
+ android_dataspace dataSpace, const CameraMetadata& info, bool maxResolution,
/*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
//check if format is not custom format
@@ -50,7 +86,8 @@
static binder::Status createSurfaceFromGbp(
camera3::OutputStreamInfo& streamInfo, bool isStreamInfoValid,
sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
- const String8 &cameraId, const CameraMetadata &physicalCameraMetadata);
+ const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
+ const std::vector<int32_t> &sensorPixelModesUsed);
static void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
camera3::camera_stream_rotation_t rotation, String8 physicalId, int32_t groupId,
@@ -86,10 +123,23 @@
hardware::camera::device::V3_4::StreamConfiguration &streamConfigV34,
const hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37);
+ static StreamConfigurationPair getStreamConfigurationPair(const CameraMetadata &metadata);
+
+ static status_t checkAndOverrideSensorPixelModesUsed(
+ const std::vector<int32_t> &sensorPixelModesUsed, int format, int width, int height,
+ const CameraMetadata &staticInfo, bool flexibleConsumer,
+ std::unordered_set<int32_t> *overriddenSensorPixelModesUsed);
+
+ static bool isUltraHighResolutionSensor(const CameraMetadata &deviceInfo);
+
+ static int32_t getAppropriateModeTag(int32_t defaultTag, bool maxResolution = false);
+
static const int32_t MAX_SURFACES_PER_STREAM = 4;
static const int32_t ROUNDING_WIDTH_CAP = 1920;
+
};
+} // camera3
} // android
#endif