Merge "Transcoder: Fixed codec leaks on cancel."
diff --git a/apex/Android.bp b/apex/Android.bp
index c1ef3d8..ef296d6 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -59,6 +59,16 @@
name: "com.android.media",
manifest: "manifest.json",
defaults: ["com.android.media-defaults"],
+ prebuilts: [
+ "media-linker-config",
+ ],
+}
+
+prebuilt_etc {
+ name: "media-linker-config",
+ src: "linker.config.txt",
+ filename: "linker.config.txt",
+ installable: false,
}
filegroup {
diff --git a/apex/TEST_MAPPING b/apex/TEST_MAPPING
index f036516..09c46d6 100644
--- a/apex/TEST_MAPPING
+++ b/apex/TEST_MAPPING
@@ -14,17 +14,9 @@
},
{
"include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
- }
- ]
- },
- {
- "name": "GtsExoPlayerTestCases",
- "options" : [
- {
- "include-annotation": "android.platform.test.annotations.SocPresubmit"
},
{
- "include-filter": "com.google.android.exoplayer.gts.DashTest#testWidevine23FpsH264Fixed"
+ "include-filter": "com.google.android.media.gts.WidevineYouTubePerformanceTests"
}
]
}
diff --git a/apex/linker.config.txt b/apex/linker.config.txt
new file mode 100644
index 0000000..d1c815b
--- /dev/null
+++ b/apex/linker.config.txt
@@ -0,0 +1,7 @@
+# Extra linker configurations for media APEX
+# See https://android.googlesource.com/platform/system/linkerconfig/+/master/README.md#apex_etc_linker_config_txt
+
+[properties]
+
+# Set media APEX as force visible so media APEX namespace is accessible via android_get_exported_namespace
+visible = true
diff --git a/apex/mediaswcodec.rc b/apex/mediaswcodec.rc
index d17481b..0c9b8c8 100644
--- a/apex/mediaswcodec.rc
+++ b/apex/mediaswcodec.rc
@@ -2,6 +2,5 @@
class main
user mediacodec
group camera drmrpc mediadrm
- override
ioprio rt 4
writepid /dev/cpuset/foreground/tasks
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index aecb70a..0b0f584 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -29,6 +29,7 @@
#include <binder/IMemory.h>
#include <camera/CameraBase.h>
+#include <camera/CameraUtils.h>
// needed to instantiate
#include <camera/Camera.h>
@@ -124,9 +125,7 @@
{
Mutex::Autolock _l(gLock);
if (gCameraService.get() == 0) {
- char value[PROPERTY_VALUE_MAX];
- property_get("config.disable_cameraservice", value, "0");
- if (strncmp(value, "0", 2) != 0 && strncasecmp(value, "false", 6) != 0) {
+ if (CameraUtils::isCameraServiceDisabled()) {
return gCameraService;
}
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 135384a..7e4c91e 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -22,6 +22,7 @@
#include <binder/Parcel.h>
#include <camera/CameraMetadata.h>
+#include <camera_metadata_hidden.h>
namespace android {
@@ -872,5 +873,8 @@
return OK;
}
+metadata_vendor_id_t CameraMetadata::getVendorId() {
+ return get_camera_metadata_vendor_id(mBuffer);
+}
}; // namespace android
diff --git a/camera/CameraUtils.cpp b/camera/CameraUtils.cpp
index 67fc116..f9b1b37 100644
--- a/camera/CameraUtils.cpp
+++ b/camera/CameraUtils.cpp
@@ -23,6 +23,7 @@
#include <system/window.h>
#include <system/graphics.h>
+#include <cutils/properties.h>
#include <utils/Log.h>
namespace android {
@@ -122,4 +123,10 @@
return OK;
}
+bool CameraUtils::isCameraServiceDisabled() {
+ char value[PROPERTY_VALUE_MAX];
+ property_get("config.disable_cameraservice", value, "0");
+ return (strncmp(value, "0", 2) != 0 && strncasecmp(value, "false", 6) != 0);
+}
+
} /* namespace android */
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index d713d2d..24fa912 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -660,6 +660,16 @@
return sGlobalVendorTagDescriptorCache;
}
+bool VendorTagDescriptorCache::isVendorCachePresent(metadata_vendor_id_t vendorId) {
+ Mutex::Autolock al(sLock);
+ if ((sGlobalVendorTagDescriptorCache.get() != nullptr) &&
+ (sGlobalVendorTagDescriptorCache->getVendorIdsAndTagDescriptors().find(vendorId) !=
+ sGlobalVendorTagDescriptorCache->getVendorIdsAndTagDescriptors().end())) {
+ return true;
+ }
+ return false;
+}
+
extern "C" {
int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* /*v*/) {
diff --git a/camera/include/camera/CameraMetadata.h b/camera/include/camera/CameraMetadata.h
index 9d1b5c7..83abdb6 100644
--- a/camera/include/camera/CameraMetadata.h
+++ b/camera/include/camera/CameraMetadata.h
@@ -237,6 +237,11 @@
static status_t getTagFromName(const char *name,
const VendorTagDescriptor* vTags, uint32_t *tag);
+ /**
+ * Return the current vendor tag id associated with this metadata.
+ */
+ metadata_vendor_id_t getVendorId();
+
private:
camera_metadata_t *mBuffer;
mutable bool mLocked;
diff --git a/camera/include/camera/CameraUtils.h b/camera/include/camera/CameraUtils.h
index f596f80..a397ccd 100644
--- a/camera/include/camera/CameraUtils.h
+++ b/camera/include/camera/CameraUtils.h
@@ -47,6 +47,11 @@
*/
static bool isNativeHandleMetadata(const sp<IMemory>& imageData);
+ /**
+ * Check if camera service is disabled on this device
+ */
+ static bool isCameraServiceDisabled();
+
private:
CameraUtils();
};
diff --git a/camera/include/camera/VendorTagDescriptor.h b/camera/include/camera/VendorTagDescriptor.h
index b2fbf3a..b3440d5 100644
--- a/camera/include/camera/VendorTagDescriptor.h
+++ b/camera/include/camera/VendorTagDescriptor.h
@@ -249,6 +249,12 @@
*/
static void clearGlobalVendorTagCache();
+ /**
+ * Return true if given vendor id is present in the vendor tag caches, return
+ * false otherwise.
+ */
+ static bool isVendorCachePresent(metadata_vendor_id_t vendorId);
+
};
} /* namespace android */
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 419250c..73cabbf 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -24,6 +24,7 @@
#include <utils/Vector.h>
#include <cutils/properties.h>
#include <stdlib.h>
+#include <camera/CameraUtils.h>
#include <camera/VendorTagDescriptor.h>
using namespace android::acam;
@@ -70,12 +71,6 @@
mCameraService.clear();
}
-static bool isCameraServiceDisabled() {
- char value[PROPERTY_VALUE_MAX];
- property_get("config.disable_cameraservice", value, "0");
- return (strncmp(value, "0", 2) != 0 && strncasecmp(value, "false", 6) != 0);
-}
-
sp<hardware::ICameraService> CameraManagerGlobal::getCameraService() {
Mutex::Autolock _l(mLock);
return getCameraServiceLocked();
@@ -83,7 +78,7 @@
sp<hardware::ICameraService> CameraManagerGlobal::getCameraServiceLocked() {
if (mCameraService.get() == nullptr) {
- if (isCameraServiceDisabled()) {
+ if (CameraUtils::isCameraServiceDisabled()) {
return mCameraService;
}
diff --git a/drm/TEST_MAPPING b/drm/TEST_MAPPING
index 2595e3e..9f6a532 100644
--- a/drm/TEST_MAPPING
+++ b/drm/TEST_MAPPING
@@ -9,17 +9,9 @@
},
{
"include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
- }
- ]
- },
- {
- "name": "GtsExoPlayerTestCases",
- "options" : [
- {
- "include-annotation": "android.platform.test.annotations.SocPresubmit"
},
{
- "include-filter": "com.google.android.exoplayer.gts.DashTest#testWidevine23FpsH264Fixed"
+ "include-filter": "com.google.android.media.gts.WidevineYouTubePerformanceTests"
}
]
}
diff --git a/drm/drmserver/DrmManager.cpp b/drm/drmserver/DrmManager.cpp
index 9a32cc5..74e3223 100644
--- a/drm/drmserver/DrmManager.cpp
+++ b/drm/drmserver/DrmManager.cpp
@@ -99,13 +99,13 @@
}
default:
{
- ALOGW("Unrecognized message type: %zd", msg->what());
+ ALOGW("Unrecognized message type: %u", msg->what());
}
}
}
int64_t DrmManager::getMetricsFlushPeriodUs() {
- return 1000 * 1000 * std::max(1ll, property_get_int64("drmmanager.metrics.period", 86400));
+ return 1000 * 1000 * std::max(1ll, (long long)property_get_int64("drmmanager.metrics.period", 86400));
}
void DrmManager::recordEngineMetrics(
diff --git a/drm/libmediadrm/DrmMetricsConsumer.cpp b/drm/libmediadrm/DrmMetricsConsumer.cpp
index b47b4ff..5f0b26e 100644
--- a/drm/libmediadrm/DrmMetricsConsumer.cpp
+++ b/drm/libmediadrm/DrmMetricsConsumer.cpp
@@ -37,8 +37,8 @@
template <> std::string GetAttributeName<KeyStatusType>(KeyStatusType type) {
static const char *type_names[] = {"USABLE", "EXPIRED",
"OUTPUT_NOT_ALLOWED", "STATUS_PENDING",
- "INTERNAL_ERROR"};
- if (((size_t)type) > arraysize(type_names)) {
+ "INTERNAL_ERROR", "USABLE_IN_FUTURE"};
+ if (((size_t)type) >= arraysize(type_names)) {
return "UNKNOWN_TYPE";
}
return type_names[(size_t)type];
@@ -48,7 +48,7 @@
static const char *type_names[] = {"PROVISION_REQUIRED", "KEY_NEEDED",
"KEY_EXPIRED", "VENDOR_DEFINED",
"SESSION_RECLAIMED"};
- if (((size_t)type) > arraysize(type_names)) {
+ if (((size_t)type) >= arraysize(type_names)) {
return "UNKNOWN_TYPE";
}
return type_names[(size_t)type];
diff --git a/drm/libmediadrm/include/mediadrm/DrmSessionManager.h b/drm/libmediadrm/include/mediadrm/DrmSessionManager.h
index 9e43504..c56bf01 100644
--- a/drm/libmediadrm/include/mediadrm/DrmSessionManager.h
+++ b/drm/libmediadrm/include/mediadrm/DrmSessionManager.h
@@ -62,7 +62,7 @@
void removeSession(const Vector<uint8_t>& sessionId);
bool reclaimSession(int callingPid);
- // sanity check APIs
+ // inspection APIs
size_t getSessionCount() const;
bool containsSession(const Vector<uint8_t>& sessionId) const;
diff --git a/include/drm/TEST_MAPPING b/include/drm/TEST_MAPPING
index 28e432e..512e844 100644
--- a/include/drm/TEST_MAPPING
+++ b/include/drm/TEST_MAPPING
@@ -8,17 +8,9 @@
},
{
"include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
- }
- ]
- },
- {
- "name": "GtsExoPlayerTestCases",
- "options" : [
- {
- "include-annotation": "android.platform.test.annotations.SocPresubmit"
},
{
- "include-filter": "com.google.android.exoplayer.gts.DashTest#testWidevine23FpsH264Fixed"
+ "include-filter": "com.google.android.media.gts.WidevineYouTubePerformanceTests"
}
]
}
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index b006f38..50facfb 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -26,17 +26,9 @@
},
{
"include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
- }
- ]
- },
- {
- "name": "GtsExoPlayerTestCases",
- "options" : [
- {
- "include-annotation": "android.platform.test.annotations.SocPresubmit"
},
{
- "include-filter": "com.google.android.exoplayer.gts.DashTest#testWidevine23FpsH264Fixed"
+ "include-filter": "com.google.android.media.gts.WidevineYouTubePerformanceTests"
}
]
}
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 533d330..8ee1efb 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -49,7 +49,12 @@
signal(SIGPIPE, SIG_IGN);
+#if 1
+ // FIXME See bug 165702394 and bug 168511485
+ const bool doLog = false;
+#else
bool doLog = (bool) property_get_bool("ro.test_harness", 0);
+#endif
pid_t childPid;
// FIXME The advantage of making the process containing media.log service the parent process of
diff --git a/media/codec2/core/include/C2Enum.h b/media/codec2/core/include/C2Enum.h
index b0fad8f..da1f43b 100644
--- a/media/codec2/core/include/C2Enum.h
+++ b/media/codec2/core/include/C2Enum.h
@@ -54,7 +54,7 @@
/// \note this will contain any initialization, which we will remove when converting to lower-case
#define _C2_GET_ENUM_NAME(x, y) #x
/// mapper to get value of enum
-#define _C2_GET_ENUM_VALUE(x, type) (_C2EnumConst<type>)x
+#define _C2_GET_ENUM_VALUE(x, type_) (_C2EnumConst<typename std::underlying_type<type_>::type>)type_::x
/// \endcond
@@ -106,7 +106,7 @@
template<> \
C2FieldDescriptor::NamedValuesType C2FieldDescriptor::namedValuesFor(const name &r __unused) { \
return _C2EnumUtils::sanitizeEnumValues( \
- std::vector<C2Value::Primitive> { _C2_MAP(_C2_GET_ENUM_VALUE, type, __VA_ARGS__) }, \
+ std::vector<C2Value::Primitive> { _C2_MAP(_C2_GET_ENUM_VALUE, name, __VA_ARGS__) }, \
{ _C2_MAP(_C2_GET_ENUM_NAME, type, __VA_ARGS__) }, \
prefix); \
}
diff --git a/media/codec2/core/include/C2Param.h b/media/codec2/core/include/C2Param.h
index 51d417a..436269a 100644
--- a/media/codec2/core/include/C2Param.h
+++ b/media/codec2/core/include/C2Param.h
@@ -508,6 +508,14 @@
return _mIndex.setPort(output);
}
+ /// sets the size of this parameter.
+ inline void setSize(size_t size) {
+ if (size < sizeof(C2Param)) {
+ size = 0;
+ }
+ _mSize = c2_min(size, _mSize);
+ }
+
public:
/// invalidate this parameter. There is no recovery from this call; e.g. parameter
/// cannot be 'corrected' to be valid.
diff --git a/media/codec2/core/include/C2ParamDef.h b/media/codec2/core/include/C2ParamDef.h
index 0a33283..d578820 100644
--- a/media/codec2/core/include/C2ParamDef.h
+++ b/media/codec2/core/include/C2ParamDef.h
@@ -97,6 +97,9 @@
PARAM_TYPE = CoreIndex | TypeFlags
};
+ // the underlying param struct type
+ typedef S Struct;
+
protected:
enum : uint32_t {
FLEX_SIZE = 0,
@@ -270,6 +273,11 @@
} \
return 0; \
} \
+ inline void setFlexCount(size_t count) { \
+ if (count < flexCount()) { \
+ this->setSize(sizeof(_Type) + _Type::FLEX_SIZE * count); \
+ } \
+ } \
/// Mark flexible member variable and make structure flexible.
#define FLEX(cls, m) \
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 73b3857..1405b97 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1331,8 +1331,6 @@
mCallback->onError(err2, ACTION_CODE_FATAL);
return;
}
- // We're not starting after flush.
- (void)mSentConfigAfterResume.test_and_set();
err2 = mChannel->start(inputFormat, outputFormat, buffersBoundToCodec);
if (err2 != OK) {
mCallback->onError(err2, ACTION_CODE_FATAL);
@@ -1580,7 +1578,6 @@
return;
}
- mSentConfigAfterResume.clear();
{
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
const std::unique_ptr<Config> &config = *configLocked;
@@ -1797,7 +1794,7 @@
// handle configuration changes in work done
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
const std::unique_ptr<Config> &config = *configLocked;
- bool changed = !mSentConfigAfterResume.test_and_set();
+ bool changed = false;
Config::Watcher<C2StreamInitDataInfo::output> initData =
config->watch<C2StreamInitDataInfo::output>();
if (!work->worklets.empty()
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index c49a16c..3c99bf6 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -91,7 +91,9 @@
newFormat->setInt32(KEY_STRIDE, stride);
ALOGD("[%s] updating stride = %d", mName, stride);
if (img->mNumPlanes > 1 && stride > 0) {
- int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
+ int64_t offsetDelta =
+ (int64_t)img->mPlane[1].mOffset - (int64_t)img->mPlane[0].mOffset;
+ int32_t vstride = int32_t(offsetDelta / stride);
newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
ALOGD("[%s] updating vstride = %d", mName, vstride);
}
diff --git a/media/codec2/sfplugin/include/media/stagefright/CCodec.h b/media/codec2/sfplugin/include/media/stagefright/CCodec.h
index ecb2506..dbbb5d5 100644
--- a/media/codec2/sfplugin/include/media/stagefright/CCodec.h
+++ b/media/codec2/sfplugin/include/media/stagefright/CCodec.h
@@ -193,7 +193,6 @@
Mutexed<std::unique_ptr<CCodecConfig>> mConfig;
Mutexed<std::list<std::unique_ptr<C2Work>>> mWorkDoneQueue;
- std::atomic_flag mSentConfigAfterResume;
friend class CCodecCallbackImpl;
diff --git a/media/codec2/tests/C2Param_test.cpp b/media/codec2/tests/C2Param_test.cpp
index 564d4d2..c39605a 100644
--- a/media/codec2/tests/C2Param_test.cpp
+++ b/media/codec2/tests/C2Param_test.cpp
@@ -2328,6 +2328,17 @@
static_assert(std::is_same<decltype(blobValue->m.value), uint8_t[]>::value, "should be uint8_t[]");
EXPECT_EQ(0, memcmp(blobValue->m.value, "ABCD\0", 6));
EXPECT_EQ(6u, blobValue->flexCount());
+ blobValue->setFlexCount(7u); // increasing the count does not change it
+ EXPECT_EQ(6u, blobValue->flexCount());
+ blobValue->setFlexCount(2u); // decreasing the count changes it to it
+ EXPECT_EQ(2u, blobValue->flexCount());
+ blobValue->setFlexCount(0u); // can decrease to 0 and blob remains valid
+ EXPECT_EQ(0u, blobValue->flexCount());
+ EXPECT_TRUE(*blobValue);
+ blobValue->invalidate(); // flex params can be invalidated => results in 0 size
+ EXPECT_FALSE(*blobValue);
+ EXPECT_EQ(0u, blobValue->size());
+
std::vector<C2FieldDescriptor> fields = blobValue->FieldList();
EXPECT_EQ(1u, fields.size());
EXPECT_EQ(FD::BLOB, fields.cbegin()->type());
diff --git a/media/codec2/tests/C2UtilTest.cpp b/media/codec2/tests/C2UtilTest.cpp
index 59cd313..2d66df1 100644
--- a/media/codec2/tests/C2UtilTest.cpp
+++ b/media/codec2/tests/C2UtilTest.cpp
@@ -78,7 +78,7 @@
{ "value2", Enum3Value2 },
{ "value4", Enum3Value4 },
{ "invalid", Invalid } });
- Enum3 e3;
+ Enum3 e3(Invalid);
C2FieldDescriptor::namedValuesFor(e3);
// upper case
diff --git a/media/extractors/TEST_MAPPING b/media/extractors/TEST_MAPPING
index 038b99a..4984b8f 100644
--- a/media/extractors/TEST_MAPPING
+++ b/media/extractors/TEST_MAPPING
@@ -1,7 +1,5 @@
{
"presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- // { "name": "ExtractorUnitTest" },
// TODO(b/153661591) enable test once the bug is fixed
// This tests the extractor path
@@ -16,5 +14,14 @@
// }
// ]
// }
+ ],
+
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "ExtractorUnitTest" }
]
+
+
}
diff --git a/media/janitors/OWNERS-codecs b/media/janitors/OWNERS-codecs
new file mode 100644
index 0000000..e201399
--- /dev/null
+++ b/media/janitors/OWNERS-codecs
@@ -0,0 +1,5 @@
+# gerrit owner/approvers for the actual software codec libraries
+# differentiated from plugins connecting those codecs to either omx or codec2 infrastructure
+essick@google.com
+lajos@google.com
+marcone@google.com
diff --git a/media/janitors/README b/media/janitors/README
new file mode 100644
index 0000000..9db8e0e
--- /dev/null
+++ b/media/janitors/README
@@ -0,0 +1,4 @@
+A collection of OWNERS files that we reference from other projects,
+such as the software codecs in directories like external/libavc.
+This is to simplify our owner/approver management across the multiple
+projects related to media.
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index 140052f..e81ab06 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -32,5 +32,6 @@
cc_library_headers {
name: "libaaudio_headers",
export_include_dirs: ["include"],
+ export_header_lib_headers: ["aaudio-aidl-cpp"],
+ header_libs: ["aaudio-aidl-cpp"],
}
-
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 717f31a..328ceda 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -85,6 +85,7 @@
"libcutils",
"libutils",
"libbinder",
+ "aaudio-aidl-cpp",
],
cflags: [
@@ -114,11 +115,10 @@
"client/AudioStreamInternalPlay.cpp",
"client/IsochronousClockModel.cpp",
"binding/AudioEndpointParcelable.cpp",
+ "binding/AAudioBinderAdapter.cpp",
"binding/AAudioBinderClient.cpp",
"binding/AAudioStreamRequest.cpp",
"binding/AAudioStreamConfiguration.cpp",
- "binding/IAAudioClient.cpp",
- "binding/IAAudioService.cpp",
"binding/RingBufferParcelable.cpp",
"binding/SharedMemoryParcelable.cpp",
"binding/SharedRegionParcelable.cpp",
@@ -138,3 +138,33 @@
misc_undefined: ["bounds"],
},
}
+
+aidl_interface {
+ name: "aaudio-aidl",
+ unstable: true,
+ local_include_dir: "binding/aidl",
+ srcs: [
+ "binding/aidl/aaudio/Endpoint.aidl",
+ "binding/aidl/aaudio/RingBuffer.aidl",
+ "binding/aidl/aaudio/SharedRegion.aidl",
+ "binding/aidl/aaudio/StreamParameters.aidl",
+ "binding/aidl/aaudio/StreamRequest.aidl",
+ "binding/aidl/aaudio/IAAudioClient.aidl",
+ "binding/aidl/aaudio/IAAudioService.aidl",
+ ],
+ imports: [
+ "audio_common-aidl",
+ "shared-file-region-aidl",
+ ],
+ backend:
+ {
+ cpp: {
+ enabled: true,
+ },
+ java: {
+ // TODO: need to have audio_common-aidl available in Java to enable
+ // this.
+ enabled: false,
+ },
+ },
+}
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
new file mode 100644
index 0000000..2b2fe6d
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binding/AAudioBinderAdapter.h>
+#include <utility/AAudioUtilities.h>
+
+namespace aaudio {
+
+using android::binder::Status;
+
+AAudioBinderAdapter::AAudioBinderAdapter(IAAudioService* delegate)
+ : mDelegate(delegate) {}
+
+void AAudioBinderAdapter::registerClient(const android::sp<IAAudioClient>& client) {
+ mDelegate->registerClient(client);
+}
+
+aaudio_handle_t AAudioBinderAdapter::openStream(const AAudioStreamRequest& request,
+ AAudioStreamConfiguration& config) {
+ aaudio_handle_t result;
+ StreamParameters params;
+ Status status = mDelegate->openStream(request.parcelable(),
+ ¶ms,
+ &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ config = params;
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::closeStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->closeStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::getStreamDescription(aaudio_handle_t streamHandle,
+ AudioEndpointParcelable& endpointOut) {
+ aaudio_result_t result;
+ Endpoint endpoint;
+ Status status = mDelegate->getStreamDescription(streamHandle,
+ &endpoint,
+ &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ endpointOut = std::move(endpoint);
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::startStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->startStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::pauseStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->pauseStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::stopStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->stopStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::flushStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->flushStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds) {
+ aaudio_result_t result;
+ Status status = mDelegate->registerAudioThread(streamHandle, clientThreadId, periodNanoseconds, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId) {
+ aaudio_result_t result;
+ Status status = mDelegate->unregisterAudioThread(streamHandle, clientThreadId, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+} // namespace aaudio
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.h b/media/libaaudio/src/binding/AAudioBinderAdapter.h
new file mode 100644
index 0000000..5e9ab57
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aaudio/IAAudioService.h>
+#include <binding/AAudioServiceInterface.h>
+
+namespace aaudio {
+
+/**
+ * An adapter that takes in an underlying IAAudioService and exposes an
+ * AAudioServiceInterface.
+ *
+ * This class is abstract: the client is expected to inherit from this class and implement those
+ * methods from AAudioServiceInterface that don't have counterparts in IAAudioService.
+ */
+class AAudioBinderAdapter : public AAudioServiceInterface {
+public:
+ explicit AAudioBinderAdapter(IAAudioService* delegate);
+
+ void registerClient(const android::sp<IAAudioClient>& client) override;
+
+ aaudio_handle_t openStream(const AAudioStreamRequest& request,
+ AAudioStreamConfiguration& configuration) override;
+
+ aaudio_result_t closeStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+ AudioEndpointParcelable& endpoint) override;
+
+ aaudio_result_t startStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t stopStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t flushStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds) override;
+
+ aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId) override;
+
+private:
+ IAAudioService* const mDelegate;
+};
+
+} // namespace aaudio
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index 7b0d31f..fa5a2da 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -19,35 +19,30 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
-#include <binder/IInterface.h>
#include <binder/IServiceManager.h>
#include <binder/ProcessState.h>
#include <utils/Mutex.h>
#include <utils/RefBase.h>
#include <utils/Singleton.h>
-#include <media/AudioSystem.h>
-
#include <aaudio/AAudio.h>
#include "AudioEndpointParcelable.h"
-#include "binding/AAudioBinderClient.h"
-//#include "binding/AAudioStreamRequest.h"
-//#include "binding/AAudioStreamConfiguration.h"
-//#include "binding/IAAudioService.h"
-//#include "binding/AAudioServiceMessage.h"
-//#include "AAudioServiceInterface.h"
+#include "binding/AAudioBinderClient.h"
+
+#define AAUDIO_SERVICE_NAME "media.aaudio"
using android::String16;
using android::IServiceManager;
using android::defaultServiceManager;
using android::interface_cast;
using android::IInterface;
-using android::IAAudioService;
using android::Mutex;
using android::ProcessState;
using android::sp;
+using android::status_t;
using android::wp;
+using android::binder::Status;
using namespace aaudio;
@@ -67,20 +62,18 @@
AAudioBinderClient::~AAudioBinderClient() {
ALOGV("%s - destroying %p", __func__, this);
Mutex::Autolock _l(mServiceLock);
- if (mAAudioService != 0) {
- IInterface::asBinder(mAAudioService)->unlinkToDeath(mAAudioClient);
- }
}
// TODO Share code with other service clients.
// Helper function to get access to the "AAudioService" service.
// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
-const sp<IAAudioService> AAudioBinderClient::getAAudioService() {
+std::shared_ptr<AAudioServiceInterface> AAudioBinderClient::getAAudioService() {
+ std::shared_ptr<AAudioServiceInterface> result;
sp<IAAudioService> aaudioService;
bool needToRegister = false;
{
Mutex::Autolock _l(mServiceLock);
- if (mAAudioService.get() == nullptr) {
+ if (mAdapter == nullptr) {
sp<IBinder> binder;
sp<IServiceManager> sm = defaultServiceManager();
// Try several times to get the service.
@@ -99,7 +92,8 @@
if (status != NO_ERROR) {
ALOGE("%s() - linkToDeath() returned %d", __func__, status);
}
- mAAudioService = interface_cast<IAAudioService>(binder);
+ aaudioService = interface_cast<IAAudioService>(binder);
+ mAdapter.reset(new Adapter(aaudioService, mAAudioClient));
needToRegister = true;
// Make sure callbacks can be received by mAAudioClient
ProcessState::self()->startThreadPool();
@@ -107,18 +101,18 @@
ALOGE("AAudioBinderClient could not connect to %s", AAUDIO_SERVICE_NAME);
}
}
- aaudioService = mAAudioService;
+ result = mAdapter;
}
// Do this outside the mutex lock.
if (needToRegister && aaudioService.get() != nullptr) { // new client?
aaudioService->registerClient(mAAudioClient);
}
- return aaudioService;
+ return result;
}
void AAudioBinderClient::dropAAudioService() {
Mutex::Autolock _l(mServiceLock);
- mAAudioService.clear(); // force a reconnect
+ mAdapter.reset();
}
/**
@@ -127,13 +121,13 @@
* @return handle to the stream or a negative error
*/
aaudio_handle_t AAudioBinderClient::openStream(const AAudioStreamRequest &request,
- AAudioStreamConfiguration &configurationOutput) {
+ AAudioStreamConfiguration &configuration) {
aaudio_handle_t stream;
for (int i = 0; i < 2; i++) {
- const sp<IAAudioService> &service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
- stream = service->openStream(request, configurationOutput);
+ stream = service->openStream(request, configuration);
if (stream == AAUDIO_ERROR_NO_SERVICE) {
ALOGE("openStream lost connection to AAudioService.");
@@ -146,8 +140,9 @@
}
aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->closeStream(streamHandle);
}
@@ -155,33 +150,38 @@
* used to communicate with the underlying HAL or Service.
*/
aaudio_result_t AAudioBinderClient::getStreamDescription(aaudio_handle_t streamHandle,
- AudioEndpointParcelable &parcelable) {
- const sp<IAAudioService> service = getAAudioService();
+ AudioEndpointParcelable& endpointOut) {
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
- return service->getStreamDescription(streamHandle, parcelable);
+
+ return service->getStreamDescription(streamHandle, endpointOut);
}
aaudio_result_t AAudioBinderClient::startStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->startStream(streamHandle);
}
aaudio_result_t AAudioBinderClient::pauseStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->pauseStream(streamHandle);
}
aaudio_result_t AAudioBinderClient::stopStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->stopStream(streamHandle);
}
aaudio_result_t AAudioBinderClient::flushStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->flushStream(streamHandle);
}
@@ -191,17 +191,16 @@
aaudio_result_t AAudioBinderClient::registerAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId,
int64_t periodNanoseconds) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
- return service->registerAudioThread(streamHandle,
- clientThreadId,
- periodNanoseconds);
+
+ return service->registerAudioThread(streamHandle, clientThreadId, periodNanoseconds);
}
aaudio_result_t AAudioBinderClient::unregisterAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
- return service->unregisterAudioThread(streamHandle,
- clientThreadId);
+
+ return service->unregisterAudioThread(streamHandle, clientThreadId);
}
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
index e8c91fc..6a7b639 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.h
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -21,12 +21,15 @@
#include <utils/Singleton.h>
#include <aaudio/AAudio.h>
-#include "AAudioServiceDefinitions.h"
+#include <binder/IInterface.h>
+
+#include "aaudio/BnAAudioClient.h"
+#include "aaudio/IAAudioService.h"
#include "AAudioServiceInterface.h"
+#include "binding/AAudioBinderAdapter.h"
#include "binding/AAudioStreamRequest.h"
-#include "binding/AAudioStreamConfiguration.h"
#include "binding/AudioEndpointParcelable.h"
-#include "binding/IAAudioService.h"
+#include "core/AAudioStreamParameters.h"
/**
* Implements the AAudioServiceInterface by talking to the service through Binder.
@@ -44,11 +47,7 @@
virtual ~AAudioBinderClient();
- const android::sp<android::IAAudioService> getAAudioService();
-
- void dropAAudioService();
-
- void registerClient(const android::sp<android::IAAudioClient>& client __unused) override {}
+ void registerClient(const android::sp<IAAudioClient>& client __unused) override {}
/**
* @param request info needed to create the stream
@@ -64,7 +63,7 @@
* used to communicate with the underlying HAL or Service.
*/
aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
- AudioEndpointParcelable &parcelable) override;
+ AudioEndpointParcelable &endpointOut) override;
/**
* Start the flow of data.
@@ -115,8 +114,7 @@
ALOGW("onStreamChange called!");
}
- class AAudioClient : public android::IBinder::DeathRecipient , public android::BnAAudioClient
- {
+ class AAudioClient : public android::IBinder::DeathRecipient, public BnAAudioClient {
public:
AAudioClient(android::wp<AAudioBinderClient> aaudioBinderClient)
: mBinderClient(aaudioBinderClient) {
@@ -132,21 +130,66 @@
}
// implement BnAAudioClient
- void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) {
+ android::binder::Status onStreamChange(int32_t handle, int32_t opcode, int32_t value) {
+ static_assert(std::is_same_v<aaudio_handle_t, int32_t>);
android::sp<AAudioBinderClient> client = mBinderClient.promote();
if (client.get() != nullptr) {
client->onStreamChange(handle, opcode, value);
}
+ return android::binder::Status::ok();
}
private:
android::wp<AAudioBinderClient> mBinderClient;
};
-private:
+ // This adapter is used to convert the binder interface (delegate) to the AudioServiceInterface
+ // conventions (translating between data types and respective parcelables, translating error
+ // codes and calling conventions).
+ // The adapter also owns the underlying service object and is responsible to unlink its death
+ // listener when destroyed.
+ class Adapter : public AAudioBinderAdapter {
+ public:
+ Adapter(const android::sp<IAAudioService>& delegate,
+ const android::sp<AAudioClient>& aaudioClient)
+ : AAudioBinderAdapter(delegate.get()),
+ mDelegate(delegate),
+ mAAudioClient(aaudioClient) {}
- android::Mutex mServiceLock;
- android::sp<android::IAAudioService> mAAudioService;
- android::sp<AAudioClient> mAAudioClient;
+ virtual ~Adapter() {
+ if (mDelegate != nullptr) {
+ android::IInterface::asBinder(mDelegate)->unlinkToDeath(mAAudioClient);
+ }
+ }
+
+ // This should never be called (call is rejected at the AudioBinderClient level).
+ aaudio_result_t startClient(aaudio_handle_t streamHandle __unused,
+ const android::AudioClient& client __unused,
+ const audio_attributes_t* attr __unused,
+ audio_port_handle_t* clientHandle __unused) override {
+ LOG_ALWAYS_FATAL("Shouldn't get here");
+ return AAUDIO_ERROR_UNAVAILABLE;
+ }
+
+ // This should never be called (call is rejected at the AudioBinderClient level).
+ aaudio_result_t stopClient(aaudio_handle_t streamHandle __unused,
+ audio_port_handle_t clientHandle __unused) override {
+ LOG_ALWAYS_FATAL("Shouldn't get here");
+ return AAUDIO_ERROR_UNAVAILABLE;
+ }
+
+ private:
+ android::sp<IAAudioService> mDelegate;
+ android::sp<AAudioClient> mAAudioClient;
+ };
+
+private:
+ android::Mutex mServiceLock;
+ std::shared_ptr<AAudioServiceInterface> mAdapter;
+ android::sp<AAudioClient> mAAudioClient;
+
+ std::shared_ptr<AAudioServiceInterface> getAAudioService();
+
+ void dropAAudioService();
};
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
index 9c28cc7..5d11512 100644
--- a/media/libaaudio/src/binding/AAudioServiceInterface.h
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -20,11 +20,11 @@
#include <utils/StrongPointer.h>
#include <media/AudioClient.h>
+#include "aaudio/IAAudioClient.h"
#include "binding/AAudioServiceDefinitions.h"
#include "binding/AAudioStreamRequest.h"
#include "binding/AAudioStreamConfiguration.h"
#include "binding/AudioEndpointParcelable.h"
-#include "binding/IAAudioClient.h"
/**
* This has the same methods as IAAudioService but without the Binder features.
@@ -40,7 +40,7 @@
AAudioServiceInterface() {};
virtual ~AAudioServiceInterface() = default;
- virtual void registerClient(const android::sp<android::IAAudioClient>& client) = 0;
+ virtual void registerClient(const android::sp<IAAudioClient>& client) = 0;
/**
* @param request info needed to create the stream
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index b785f88..2d501ef 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -23,101 +23,66 @@
#include <sys/mman.h>
#include <aaudio/AAudio.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
-
#include "binding/AAudioStreamConfiguration.h"
-using android::NO_ERROR;
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
using namespace aaudio;
-AAudioStreamConfiguration::AAudioStreamConfiguration() {}
-AAudioStreamConfiguration::~AAudioStreamConfiguration() {}
+using android::media::audio::common::AudioFormat;
-status_t AAudioStreamConfiguration::writeToParcel(Parcel* parcel) const {
- status_t status;
-
- status = parcel->writeInt32(getDeviceId());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(getSampleRate());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(getSamplesPerFrame());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getSharingMode());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getFormat());
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeInt32((int32_t) getDirection());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(getBufferCapacity());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getUsage());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getContentType());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getInputPreset());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getAllowedCapturePolicy());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(getSessionId());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(isPrivacySensitive() ? 1 : 0);
- if (status != NO_ERROR) goto error;
- return NO_ERROR;
-error:
- ALOGE("%s(): write failed = %d", __func__, status);
- return status;
+AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
+ setSamplesPerFrame(parcelable.samplesPerFrame);
+ setSampleRate(parcelable.sampleRate);
+ setDeviceId(parcelable.deviceId);
+ static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(parcelable.sharingMode));
+ setSharingMode(parcelable.sharingMode);
+ static_assert(sizeof(audio_format_t) == sizeof(parcelable.audioFormat));
+ setFormat(static_cast<audio_format_t>(parcelable.audioFormat));
+ static_assert(sizeof(aaudio_direction_t) == sizeof(parcelable.direction));
+ setDirection(parcelable.direction);
+ static_assert(sizeof(audio_usage_t) == sizeof(parcelable.usage));
+ setUsage(parcelable.usage);
+ static_assert(sizeof(aaudio_content_type_t) == sizeof(parcelable.contentType));
+ setContentType(parcelable.contentType);
+ static_assert(sizeof(aaudio_input_preset_t) == sizeof(parcelable.inputPreset));
+ setInputPreset(parcelable.inputPreset);
+ setBufferCapacity(parcelable.bufferCapacity);
+ static_assert(
+ sizeof(aaudio_allowed_capture_policy_t) == sizeof(parcelable.allowedCapturePolicy));
+ setAllowedCapturePolicy(parcelable.allowedCapturePolicy);
+ static_assert(sizeof(aaudio_session_id_t) == sizeof(parcelable.sessionId));
+ setSessionId(parcelable.sessionId);
+ setPrivacySensitive(parcelable.isPrivacySensitive);
}
-status_t AAudioStreamConfiguration::readFromParcel(const Parcel* parcel) {
- int32_t value;
- status_t status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setDeviceId(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setSampleRate(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setSamplesPerFrame(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setSharingMode((aaudio_sharing_mode_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setFormat((audio_format_t) value);
+AAudioStreamConfiguration&
+AAudioStreamConfiguration::operator=(const StreamParameters& parcelable) {
+ this->~AAudioStreamConfiguration();
+ new (this) AAudioStreamConfiguration(parcelable);
+ return *this;
+}
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setDirection((aaudio_direction_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setBufferCapacity(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setUsage((aaudio_usage_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setContentType((aaudio_content_type_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setInputPreset((aaudio_input_preset_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setAllowedCapturePolicy((aaudio_allowed_capture_policy_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setSessionId(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setPrivacySensitive(value == 1);
- return NO_ERROR;
-error:
- ALOGE("%s(): read failed = %d", __func__, status);
- return status;
+StreamParameters AAudioStreamConfiguration::parcelable() const {
+ StreamParameters result;
+ result.samplesPerFrame = getSamplesPerFrame();
+ result.sampleRate = getSampleRate();
+ result.deviceId = getDeviceId();
+ static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(result.sharingMode));
+ result.sharingMode = getSharingMode();
+ static_assert(sizeof(audio_format_t) == sizeof(result.audioFormat));
+ result.audioFormat = static_cast<AudioFormat>(getFormat());
+ static_assert(sizeof(aaudio_direction_t) == sizeof(result.direction));
+ result.direction = getDirection();
+ static_assert(sizeof(audio_usage_t) == sizeof(result.usage));
+ result.usage = getUsage();
+ static_assert(sizeof(aaudio_content_type_t) == sizeof(result.contentType));
+ result.contentType = getContentType();
+ static_assert(sizeof(aaudio_input_preset_t) == sizeof(result.inputPreset));
+ result.inputPreset = getInputPreset();
+ result.bufferCapacity = getBufferCapacity();
+ static_assert(sizeof(aaudio_allowed_capture_policy_t) == sizeof(result.allowedCapturePolicy));
+ result.allowedCapturePolicy = getAllowedCapturePolicy();
+ static_assert(sizeof(aaudio_session_id_t) == sizeof(result.sessionId));
+ result.sessionId = getSessionId();
+ result.isPrivacySensitive = isPrivacySensitive();
+ return result;
}
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.h b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
index b324896..f428eb0 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.h
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
@@ -20,24 +20,24 @@
#include <stdint.h>
#include <aaudio/AAudio.h>
+#include <aaudio/StreamParameters.h>
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
#include "core/AAudioStreamParameters.h"
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
namespace aaudio {
-class AAudioStreamConfiguration : public AAudioStreamParameters, public Parcelable {
+// This is a holder for AAudioStreamParameters, which allows conversion to/from it parcelable
+// representation, StreamParameters.
+class AAudioStreamConfiguration : public AAudioStreamParameters {
public:
- AAudioStreamConfiguration();
- virtual ~AAudioStreamConfiguration();
+ AAudioStreamConfiguration() = default;
- virtual status_t writeToParcel(Parcel* parcel) const override;
+ explicit AAudioStreamConfiguration(const StreamParameters& parcelable);
- virtual status_t readFromParcel(const Parcel* parcel) override;
+ AAudioStreamConfiguration& operator=(const StreamParameters& parcelable);
+
+ StreamParameters parcelable() const;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index c30c5b9..536395a 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -21,67 +21,32 @@
#include <stdint.h>
#include <sys/mman.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
#include <aaudio/AAudio.h>
#include "binding/AAudioStreamConfiguration.h"
#include "binding/AAudioStreamRequest.h"
-using android::NO_ERROR;
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
using namespace aaudio;
-AAudioStreamRequest::AAudioStreamRequest()
- : mConfiguration()
- {}
-
-AAudioStreamRequest::~AAudioStreamRequest() {}
-
-status_t AAudioStreamRequest::writeToParcel(Parcel* parcel) const {
- status_t status = parcel->writeInt32((int32_t) mUserId);
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeBool(mSharingModeMatchRequired);
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeBool(mInService);
- if (status != NO_ERROR) goto error;
-
- status = mConfiguration.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
-
- return NO_ERROR;
-
-error:
- ALOGE("writeToParcel(): write failed = %d", status);
- return status;
+AAudioStreamRequest::AAudioStreamRequest(const StreamRequest& parcelable) :
+ mConfiguration(std::move(parcelable.params)),
+ mUserId(parcelable.userId),
+ mProcessId(parcelable.processId),
+ mSharingModeMatchRequired(parcelable.sharingModeMatchRequired),
+ mInService(parcelable.inService) {
+ static_assert(sizeof(mUserId) == sizeof(parcelable.userId));
+ static_assert(sizeof(mProcessId) == sizeof(parcelable.processId));
}
-status_t AAudioStreamRequest::readFromParcel(const Parcel* parcel) {
- int32_t temp;
- status_t status = parcel->readInt32(&temp);
- if (status != NO_ERROR) goto error;
- mUserId = (uid_t) temp;
-
- status = parcel->readBool(&mSharingModeMatchRequired);
- if (status != NO_ERROR) goto error;
-
- status = parcel->readBool(&mInService);
- if (status != NO_ERROR) goto error;
-
- status = mConfiguration.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
-
- return NO_ERROR;
-
-error:
- ALOGE("readFromParcel(): read failed = %d", status);
- return status;
+StreamRequest AAudioStreamRequest::parcelable() const {
+ StreamRequest result;
+ result.params = std::move(mConfiguration).parcelable();
+ result.userId = mUserId;
+ result.processId = mProcessId;
+ result.sharingModeMatchRequired = mSharingModeMatchRequired;
+ result.inService = mInService;
+ return result;
}
aaudio_result_t AAudioStreamRequest::validate() const {
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
index 492f69d..31d3ea1 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.h
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -20,21 +20,18 @@
#include <stdint.h>
#include <aaudio/AAudio.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <aaudio/StreamRequest.h>
#include "binding/AAudioStreamConfiguration.h"
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
namespace aaudio {
-class AAudioStreamRequest : public Parcelable {
+class AAudioStreamRequest {
public:
- AAudioStreamRequest();
- virtual ~AAudioStreamRequest();
+ AAudioStreamRequest() = default;
+
+ // Construct based on a parcelable representation.
+ explicit AAudioStreamRequest(const StreamRequest& parcelable);
uid_t getUserId() const {
return mUserId;
@@ -76,15 +73,14 @@
mInService = inService;
}
- virtual status_t writeToParcel(Parcel* parcel) const override;
-
- virtual status_t readFromParcel(const Parcel* parcel) override;
-
aaudio_result_t validate() const;
void dump() const;
-protected:
+ // Extract a parcelable representation of this object.
+ StreamRequest parcelable() const;
+
+private:
AAudioStreamConfiguration mConfiguration;
uid_t mUserId = (uid_t) -1;
pid_t mProcessId = (pid_t) -1;
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index 61d7d27..aa4ac27 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -29,22 +29,43 @@
#include "binding/AudioEndpointParcelable.h"
using android::base::unique_fd;
+using android::media::SharedFileRegion;
using android::NO_ERROR;
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
using namespace aaudio;
-/**
- * Container for information about the message queues plus
- * general stream information needed by AAudio clients.
- * It contains no addresses, just sizes, offsets and file descriptors for
- * shared memory that can be passed through Binder.
- */
-AudioEndpointParcelable::AudioEndpointParcelable() {}
+AudioEndpointParcelable::AudioEndpointParcelable(Endpoint&& parcelable)
+ : mUpMessageQueueParcelable(std::move(parcelable.upMessageQueueParcelable)),
+ mDownMessageQueueParcelable(std::move(parcelable.downMessageQueueParcelable)),
+ mUpDataQueueParcelable(std::move(parcelable.upDataQueueParcelable)),
+ mDownDataQueueParcelable(std::move(parcelable.downDataQueueParcelable)),
+ mNumSharedMemories(parcelable.sharedMemories.size()) {
+ for (size_t i = 0; i < parcelable.sharedMemories.size() && i < MAX_SHARED_MEMORIES; ++i) {
+ // Re-construct.
+ mSharedMemories[i].~SharedMemoryParcelable();
+ new(&mSharedMemories[i]) SharedMemoryParcelable(std::move(parcelable.sharedMemories[i]));
+ }
+}
-AudioEndpointParcelable::~AudioEndpointParcelable() {}
+AudioEndpointParcelable& AudioEndpointParcelable::operator=(Endpoint&& parcelable) {
+ this->~AudioEndpointParcelable();
+ new(this) AudioEndpointParcelable(std::move(parcelable));
+ return *this;
+}
+
+Endpoint AudioEndpointParcelable::parcelable()&& {
+ Endpoint result;
+ result.upMessageQueueParcelable = std::move(mUpMessageQueueParcelable).parcelable();
+ result.downMessageQueueParcelable = std::move(mDownMessageQueueParcelable).parcelable();
+ result.upDataQueueParcelable = std::move(mUpDataQueueParcelable).parcelable();
+ result.downDataQueueParcelable = std::move(mDownDataQueueParcelable).parcelable();
+ result.sharedMemories.reserve(std::min(mNumSharedMemories, MAX_SHARED_MEMORIES));
+ for (size_t i = 0; i < mNumSharedMemories && i < MAX_SHARED_MEMORIES; ++i) {
+ result.sharedMemories.emplace_back(std::move(mSharedMemories[i]).parcelable());
+ }
+ return result;
+}
/**
* Add the file descriptor to the table.
@@ -60,60 +81,6 @@
return index;
}
-/**
- * The read and write must be symmetric.
- */
-status_t AudioEndpointParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeInt32(mNumSharedMemories);
- if (status != NO_ERROR) goto error;
-
- for (int i = 0; i < mNumSharedMemories; i++) {
- status = mSharedMemories[i].writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- }
- status = mUpMessageQueueParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDownMessageQueueParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mUpDataQueueParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDownDataQueueParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
-
- return NO_ERROR;
-
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
-status_t AudioEndpointParcelable::readFromParcel(const Parcel* parcel) {
- status_t status = parcel->readInt32(&mNumSharedMemories);
- if (status != NO_ERROR) goto error;
-
- for (int i = 0; i < mNumSharedMemories; i++) {
- mSharedMemories[i].readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- }
- status = mUpMessageQueueParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDownMessageQueueParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mUpDataQueueParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDownDataQueueParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
-
- return AAudioConvert_aaudioToAndroidStatus(validate());
-
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
aaudio_result_t result = mUpMessageQueueParcelable.resolve(mSharedMemories,
&descriptor->upMessageQueueDescriptor);
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
index e4f8b9e..5237a1a 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -20,16 +20,13 @@
#include <stdint.h>
//#include <sys/mman.h>
+#include <aaudio/Endpoint.h>
#include <android-base/unique_fd.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
#include "binding/AAudioServiceDefinitions.h"
#include "binding/RingBufferParcelable.h"
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
namespace aaudio {
@@ -39,10 +36,15 @@
* It contains no addresses, just sizes, offsets and file descriptors for
* shared memory that can be passed through Binder.
*/
-class AudioEndpointParcelable : public Parcelable {
+class AudioEndpointParcelable {
public:
- AudioEndpointParcelable();
- virtual ~AudioEndpointParcelable();
+ AudioEndpointParcelable() = default;
+
+ // Ctor/assignment from a parcelable representation.
+ // Since the parcelable object owns unique FDs (for shared memory blocks), move semantics are
+ // provided to avoid the need to dupe.
+ AudioEndpointParcelable(Endpoint&& parcelable);
+ AudioEndpointParcelable& operator=(Endpoint&& parcelable);
/**
* Add the file descriptor to the table.
@@ -50,16 +52,17 @@
*/
int32_t addFileDescriptor(const android::base::unique_fd& fd, int32_t sizeInBytes);
- virtual status_t writeToParcel(Parcel* parcel) const override;
-
- virtual status_t readFromParcel(const Parcel* parcel) override;
-
aaudio_result_t resolve(EndpointDescriptor *descriptor);
aaudio_result_t close();
void dump();
+ // Extract a parcelable representation of this object.
+ // Since our shared memory objects own a unique FD, move semantics are provided to avoid the
+ // need to dupe.
+ Endpoint parcelable()&&;
+
public: // TODO add getters
// Set capacityInFrames to zero if Queue is unused.
RingBufferParcelable mUpMessageQueueParcelable; // server to client
diff --git a/media/libaaudio/src/binding/IAAudioClient.cpp b/media/libaaudio/src/binding/IAAudioClient.cpp
deleted file mode 100644
index c69c4e8..0000000
--- a/media/libaaudio/src/binding/IAAudioClient.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AAudio"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <aaudio/AAudio.h>
-
-#include "binding/AAudioBinderClient.h"
-#include "binding/AAudioServiceDefinitions.h"
-#include "binding/IAAudioClient.h"
-#include "utility/AAudioUtilities.h"
-
-namespace android {
-
-using aaudio::aaudio_handle_t;
-
-/**
- * This is used by the AAudio Service to talk to an AAudio Client.
- *
- * The order of parameters in the Parcels must match with code in AAudioClient.cpp.
- */
-class BpAAudioClient : public BpInterface<IAAudioClient>
-{
-public:
- explicit BpAAudioClient(const sp<IBinder>& impl)
- : BpInterface<IAAudioClient>(impl)
- {
- }
-
- void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) override {
- Parcel data, reply;
- data.writeInterfaceToken(IAAudioClient::getInterfaceDescriptor());
- data.writeInt32(handle);
- data.writeInt32(opcode);
- data.writeInt32(value);
- remote()->transact(ON_STREAM_CHANGE, data, &reply, IBinder::FLAG_ONEWAY);
- }
-
-};
-
-// Implement an interface to the service.
-IMPLEMENT_META_INTERFACE(AAudioClient, "IAAudioClient");
-
-// The order of parameters in the Parcels must match with code in BpAAudioClient
-
-status_t BnAAudioClient::onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags) {
- aaudio_handle_t streamHandle;
- int32_t opcode = 0;
- int32_t value = 0;
- ALOGV("BnAAudioClient::onTransact(%u) %u", code, flags);
-
- switch(code) {
- case ON_STREAM_CHANGE: {
- CHECK_INTERFACE(IAAudioClient, data, reply);
- data.readInt32(&streamHandle);
- data.readInt32(&opcode);
- data.readInt32(&value);
- onStreamChange(streamHandle, opcode, value);
- ALOGD("BnAAudioClient onStreamChange(%x, %d, %d)", streamHandle, opcode, value);
- return NO_ERROR;
- } break;
-
- default:
- // ALOGW("BnAAudioClient::onTransact not handled %u", code);
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-} /* namespace android */
diff --git a/media/libaaudio/src/binding/IAAudioClient.h b/media/libaaudio/src/binding/IAAudioClient.h
deleted file mode 100644
index f21fd93..0000000
--- a/media/libaaudio/src/binding/IAAudioClient.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AAUDIO_IAAUDIO_CLIENT_H
-#define ANDROID_AAUDIO_IAAUDIO_CLIENT_H
-
-#include <stdint.h>
-#include <binder/IInterface.h>
-
-#include <aaudio/AAudio.h>
-
-#include "binding/AAudioCommon.h"
-
-namespace android {
-
-
-// Interface (our AIDL) - client methods called by service
-class IAAudioClient : public IInterface {
-public:
-
- DECLARE_META_INTERFACE(AAudioClient);
-
- virtual void onStreamChange(aaudio::aaudio_handle_t handle, int32_t opcode, int32_t value) = 0;
-
-};
-
-class BnAAudioClient : public BnInterface<IAAudioClient> {
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags = 0);
-};
-
-} /* namespace android */
-
-#endif //ANDROID_AAUDIO_IAAUDIO_SERVICE_H
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
deleted file mode 100644
index e017b3a..0000000
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AAudio"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <aaudio/AAudio.h>
-#include <binder/IPCThreadState.h>
-
-#include "binding/AudioEndpointParcelable.h"
-#include "binding/AAudioStreamRequest.h"
-#include "binding/AAudioServiceDefinitions.h"
-#include "binding/AAudioStreamConfiguration.h"
-#include "binding/IAAudioService.h"
-#include "utility/AAudioUtilities.h"
-
-namespace android {
-
-using aaudio::aaudio_handle_t;
-
-/**
- * This is used by the AAudio Client to talk to the AAudio Service.
- *
- * The order of parameters in the Parcels must match with code in AAudioService.cpp.
- */
-class BpAAudioService : public BpInterface<IAAudioService>
-{
-public:
- explicit BpAAudioService(const sp<IBinder>& impl)
- : BpInterface<IAAudioService>(impl)
- {
- }
-
- void registerClient(const sp<IAAudioClient>& client) override
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(client));
- remote()->transact(REGISTER_CLIENT, data, &reply);
- }
-
- aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
- aaudio::AAudioStreamConfiguration &configurationOutput) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- // request.dump();
- request.writeToParcel(&data);
- status_t err = remote()->transact(OPEN_STREAM, data, &reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client openStream transact failed %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_handle_t stream;
- err = reply.readInt32(&stream);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client transact(OPEN_STREAM) readInt %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- } else if (stream < 0) {
- return stream;
- }
- err = configurationOutput.readFromParcel(&reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client openStream readFromParcel failed %d", err);
- closeStream(stream);
- return AAudioConvert_androidToAAudioResult(err);
- }
- return stream;
- }
-
- virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client closeStream transact failed %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
- aaudio::AudioEndpointParcelable &parcelable) {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) returns %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t result;
- err = reply.readInt32(&result);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) readInt %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- } else if (result != AAUDIO_OK) {
- ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION passed result %d", result);
- return result;
- }
- err = parcelable.readFromParcel(&reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) read endpoint %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- }
- return result;
- }
-
- // TODO should we wait for a reply?
- virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(START_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(PAUSE_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(STOP_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(FLUSH_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
- pid_t clientThreadId,
- int64_t periodNanoseconds)
- override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- data.writeInt32((int32_t) clientThreadId);
- data.writeInt64(periodNanoseconds);
- status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
- pid_t clientThreadId)
- override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- data.writeInt32((int32_t) clientThreadId);
- status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
-};
-
-// Implement an interface to the service.
-// This is here so that you don't have to link with libaaudio static library.
-IMPLEMENT_META_INTERFACE(AAudioService, "IAAudioService");
-
-// The order of parameters in the Parcels must match with code in BpAAudioService
-
-status_t BnAAudioService::onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags) {
- aaudio_handle_t streamHandle = 0;
- aaudio::AAudioStreamRequest request;
- aaudio::AAudioStreamConfiguration configuration;
- pid_t tid = 0;
- int64_t nanoseconds = 0;
- aaudio_result_t result = AAUDIO_OK;
- status_t status = NO_ERROR;
- ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
-
- switch(code) {
- case REGISTER_CLIENT: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- sp<IAAudioClient> client = interface_cast<IAAudioClient>(
- data.readStrongBinder());
- // readStrongBinder() can return null
- if (client.get() == nullptr) {
- ALOGE("BnAAudioService::%s(REGISTER_CLIENT) client is NULL!", __func__);
- android_errorWriteLog(0x534e4554, "116230453");
- return DEAD_OBJECT;
- } else {
- registerClient(client);
- return NO_ERROR;
- }
- } break;
-
- case OPEN_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- request.readFromParcel(&data);
- result = request.validate();
- if (result != AAUDIO_OK) {
- streamHandle = result;
- } else {
- //ALOGD("BnAAudioService::client openStream request dump --------------------");
- //request.dump();
- // Override the uid and pid from the client in case they are incorrect.
- request.setUserId(IPCThreadState::self()->getCallingUid());
- request.setProcessId(IPCThreadState::self()->getCallingPid());
- streamHandle = openStream(request, configuration);
- //ALOGD("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X",
- // streamHandle);
- }
- reply->writeInt32(streamHandle);
- configuration.writeToParcel(reply);
- return NO_ERROR;
- } break;
-
- case CLOSE_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(CLOSE_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = closeStream(streamHandle);
- //ALOGD("BnAAudioService::onTransact CLOSE_STREAM 0x%08X, result = %d",
- // streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case GET_STREAM_DESCRIPTION: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(GET_STREAM_DESCRIPTION) streamHandle failed!", __func__);
- return status;
- }
- aaudio::AudioEndpointParcelable parcelable;
- result = getStreamDescription(streamHandle, parcelable);
- if (result != AAUDIO_OK) {
- return AAudioConvert_aaudioToAndroidStatus(result);
- }
- status = reply->writeInt32(result);
- if (status != NO_ERROR) {
- return status;
- }
- return parcelable.writeToParcel(reply);
- } break;
-
- case START_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(START_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = startStream(streamHandle);
- ALOGV("BnAAudioService::onTransact START_STREAM 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case PAUSE_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(PAUSE_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = pauseStream(streamHandle);
- ALOGV("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case STOP_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(STOP_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = stopStream(streamHandle);
- ALOGV("BnAAudioService::onTransact STOP_STREAM 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case FLUSH_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(FLUSH_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = flushStream(streamHandle);
- ALOGV("BnAAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case REGISTER_AUDIO_THREAD: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(REGISTER_AUDIO_THREAD) streamHandle failed!", __func__);
- return status;
- }
- status = data.readInt32(&tid);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(REGISTER_AUDIO_THREAD) tid failed!", __func__);
- return status;
- }
- status = data.readInt64(&nanoseconds);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(REGISTER_AUDIO_THREAD) nanoseconds failed!", __func__);
- return status;
- }
- result = registerAudioThread(streamHandle, tid, nanoseconds);
- ALOGV("BnAAudioService::%s(REGISTER_AUDIO_THREAD) 0x%08X, result = %d",
- __func__, streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case UNREGISTER_AUDIO_THREAD: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(UNREGISTER_AUDIO_THREAD) streamHandle failed!", __func__);
- return status;
- }
- status = data.readInt32(&tid);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(UNREGISTER_AUDIO_THREAD) tid failed!", __func__);
- return status;
- }
- result = unregisterAudioThread(streamHandle, tid);
- ALOGV("BnAAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- default:
- // ALOGW("BnAAudioService::onTransact not handled %u", code);
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-} /* namespace android */
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
deleted file mode 100644
index 6bdb826..0000000
--- a/media/libaaudio/src/binding/IAAudioService.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AAUDIO_IAAUDIO_SERVICE_H
-#define ANDROID_AAUDIO_IAAUDIO_SERVICE_H
-
-#include <stdint.h>
-#include <utils/RefBase.h>
-#include <binder/TextOutput.h>
-#include <binder/IInterface.h>
-
-#include <aaudio/AAudio.h>
-
-#include "binding/AAudioCommon.h"
-#include "binding/AAudioServiceDefinitions.h"
-#include "binding/AAudioStreamConfiguration.h"
-#include "binding/AAudioStreamRequest.h"
-#include "binding/AudioEndpointParcelable.h"
-#include "binding/IAAudioClient.h"
-
-namespace android {
-
-#define AAUDIO_SERVICE_NAME "media.aaudio"
-
-// Interface (our AIDL) - service methods called by client
-class IAAudioService : public IInterface {
-public:
-
- DECLARE_META_INTERFACE(AAudioService);
-
- // Register an object to receive audio input/output change and track notifications.
- // For a given calling pid, AAudio service disregards any registrations after the first.
- // Thus the IAAudioClient must be a singleton per process.
- virtual void registerClient(const sp<IAAudioClient>& client) = 0;
-
- /**
- * @param request info needed to create the stream
- * @param configuration contains information about the created stream
- * @return handle to the stream or a negative error
- */
- virtual aaudio::aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
- aaudio::AAudioStreamConfiguration &configurationOutput) = 0;
-
- virtual aaudio_result_t closeStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /* Get an immutable description of the in-memory queues
- * used to communicate with the underlying HAL or Service.
- */
- virtual aaudio_result_t getStreamDescription(aaudio::aaudio_handle_t streamHandle,
- aaudio::AudioEndpointParcelable &parcelable) = 0;
-
- /**
- * Start the flow of data.
- * This is asynchronous. When complete, the service will send a STARTED event.
- */
- virtual aaudio_result_t startStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /**
- * Stop the flow of data such that start() can resume without loss of data.
- * This is asynchronous. When complete, the service will send a PAUSED event.
- */
- virtual aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /**
- * Stop the flow of data such that the data currently in the buffer is played.
- * This is asynchronous. When complete, the service will send a STOPPED event.
- */
- virtual aaudio_result_t stopStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /**
- * Discard any data held by the underlying HAL or Service.
- * This is asynchronous. When complete, the service will send a FLUSHED event.
- */
- virtual aaudio_result_t flushStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /**
- * Manage the specified thread as a low latency audio thread.
- */
- virtual aaudio_result_t registerAudioThread(aaudio::aaudio_handle_t streamHandle,
- pid_t clientThreadId,
- int64_t periodNanoseconds) = 0;
-
- virtual aaudio_result_t unregisterAudioThread(aaudio::aaudio_handle_t streamHandle,
- pid_t clientThreadId) = 0;
-};
-
-class BnAAudioService : public BnInterface<IAAudioService> {
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags = 0);
-
-};
-
-} /* namespace android */
-
-#endif //ANDROID_AAUDIO_IAAUDIO_SERVICE_H
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
index 4996b3f..a4b3cec 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -29,8 +29,29 @@
using namespace aaudio;
-RingBufferParcelable::RingBufferParcelable() {}
-RingBufferParcelable::~RingBufferParcelable() {}
+RingBufferParcelable::RingBufferParcelable(const RingBuffer& parcelable)
+ : mReadCounterParcelable(std::move(parcelable.readCounterParcelable)),
+ mWriteCounterParcelable(std::move(parcelable.writeCounterParcelable)),
+ mDataParcelable(std::move(parcelable.dataParcelable)),
+ mBytesPerFrame(parcelable.bytesPerFrame),
+ mFramesPerBurst(parcelable.framesPerBurst),
+ mCapacityInFrames(parcelable.capacityInFrames),
+ mFlags(static_cast<RingbufferFlags>(parcelable.flags)) {
+ static_assert(sizeof(mFlags) == sizeof(parcelable.flags));
+}
+
+RingBuffer RingBufferParcelable::parcelable() const {
+ RingBuffer result;
+ result.readCounterParcelable = std::move(mReadCounterParcelable).parcelable();
+ result.writeCounterParcelable = std::move(mWriteCounterParcelable).parcelable();
+ result.dataParcelable = std::move(mDataParcelable).parcelable();
+ result.bytesPerFrame = mBytesPerFrame;
+ result.framesPerBurst = mFramesPerBurst;
+ result.capacityInFrames = mCapacityInFrames;
+ static_assert(sizeof(mFlags) == sizeof(result.flags));
+ result.flags = static_cast<int32_t>(mFlags);
+ return result;
+}
// TODO This assumes that all three use the same SharedMemoryParcelable
void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
@@ -76,58 +97,6 @@
mCapacityInFrames = capacityInFrames;
}
-/**
- * The read and write must be symmetric.
- */
-status_t RingBufferParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeInt32(mCapacityInFrames);
- if (status != NO_ERROR) goto error;
- if (mCapacityInFrames > 0) {
- status = parcel->writeInt32(mBytesPerFrame);
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(mFramesPerBurst);
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(mFlags);
- if (status != NO_ERROR) goto error;
- status = mReadCounterParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mWriteCounterParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDataParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- }
- return NO_ERROR;
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
-status_t RingBufferParcelable::readFromParcel(const Parcel* parcel) {
- status_t status = parcel->readInt32(&mCapacityInFrames);
- if (status != NO_ERROR) goto error;
- if (mCapacityInFrames > 0) {
- status = parcel->readInt32(&mBytesPerFrame);
- if (status != NO_ERROR) goto error;
- status = parcel->readInt32(&mFramesPerBurst);
- if (status != NO_ERROR) goto error;
- status = parcel->readInt32((int32_t *)&mFlags);
- if (status != NO_ERROR) goto error;
- status = mReadCounterParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mWriteCounterParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDataParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- }
- return AAudioConvert_aaudioToAndroidStatus(validate());
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
aaudio_result_t RingBufferParcelable::resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor) {
aaudio_result_t result;
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.h b/media/libaaudio/src/binding/RingBufferParcelable.h
index 1dbcf07..2508cea 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.h
+++ b/media/libaaudio/src/binding/RingBufferParcelable.h
@@ -19,6 +19,7 @@
#include <stdint.h>
+#include <aaudio/RingBuffer.h>
#include <binder/Parcelable.h>
#include "binding/AAudioServiceDefinitions.h"
@@ -26,10 +27,12 @@
namespace aaudio {
-class RingBufferParcelable : public Parcelable {
+class RingBufferParcelable {
public:
- RingBufferParcelable();
- virtual ~RingBufferParcelable();
+ RingBufferParcelable() = default;
+
+ // Construct based on a parcelable representation.
+ explicit RingBufferParcelable(const RingBuffer& parcelable);
// TODO This assumes that all three use the same SharedMemoryParcelable
void setupMemory(int32_t sharedMemoryIndex,
@@ -57,21 +60,14 @@
bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
- /**
- * The read and write must be symmetric.
- */
- virtual status_t writeToParcel(Parcel* parcel) const override;
-
- virtual status_t readFromParcel(const Parcel* parcel) override;
-
aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
void dump();
+ // Extract a parcelable representation of this object.
+ RingBuffer parcelable() const;
+
private:
-
- aaudio_result_t validate() const;
-
SharedRegionParcelable mReadCounterParcelable;
SharedRegionParcelable mWriteCounterParcelable;
SharedRegionParcelable mDataParcelable;
@@ -79,6 +75,8 @@
int32_t mFramesPerBurst = 0; // for ISOCHRONOUS queues
int32_t mCapacityInFrames = 0; // zero if unused
RingbufferFlags mFlags = RingbufferFlags::NONE;
+
+ aaudio_result_t validate() const;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index b6e8472..685b779 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -18,6 +18,7 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
@@ -33,61 +34,36 @@
using android::base::unique_fd;
using android::NO_ERROR;
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
+using android::media::SharedFileRegion;
using namespace aaudio;
-SharedMemoryParcelable::SharedMemoryParcelable() {}
-SharedMemoryParcelable::~SharedMemoryParcelable() {};
+SharedMemoryParcelable::SharedMemoryParcelable(SharedFileRegion&& parcelable) {
+ mFd = parcelable.fd.release();
+ mSizeInBytes = parcelable.size;
+ mOffsetInBytes = parcelable.offset;
+}
+
+SharedFileRegion SharedMemoryParcelable::parcelable() && {
+ SharedFileRegion result;
+ result.fd.reset(std::move(mFd));
+ result.size = mSizeInBytes;
+ result.offset = mOffsetInBytes;
+ return result;
+}
+
+SharedMemoryParcelable SharedMemoryParcelable::dup() const {
+ SharedMemoryParcelable result;
+ result.setup(mFd, static_cast<int32_t>(mSizeInBytes));
+ return result;
+}
void SharedMemoryParcelable::setup(const unique_fd& fd, int32_t sizeInBytes) {
- mFd.reset(dup(fd.get())); // store a duplicate fd
+ mFd.reset(::dup(fd.get())); // store a duplicate fd
ALOGV("setup(fd = %d -> %d, size = %d) this = %p\n", fd.get(), mFd.get(), sizeInBytes, this);
mSizeInBytes = sizeInBytes;
}
-status_t SharedMemoryParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
- if (status != NO_ERROR) return status;
-
- status = parcel->writeInt32(mSizeInBytes);
- if (status != NO_ERROR) return status;
- if (mSizeInBytes > 0) {
- ALOGV("writeToParcel() mFd = %d, this = %p\n", mFd.get(), this);
- status = parcel->writeUniqueFileDescriptor(mFd);
- ALOGE_IF(status != NO_ERROR, "SharedMemoryParcelable writeDupFileDescriptor failed : %d",
- status);
- }
- return status;
-}
-
-status_t SharedMemoryParcelable::readFromParcel(const Parcel* parcel) {
- status_t status = parcel->readInt32(&mSizeInBytes);
- if (status != NO_ERROR) goto error;
-
- if (mSizeInBytes > 0) {
- // The Parcel owns the file descriptor and will close it later.
- unique_fd mmapFd;
- status = parcel->readUniqueFileDescriptor(&mmapFd);
- if (status != NO_ERROR) {
- ALOGE("readFromParcel() readUniqueFileDescriptor() failed : %d", status);
- goto error;
- }
-
- // Resolve the memory now while we still have the FD from the Parcel.
- // Closing the FD will not affect the shared memory once mmap() has been called.
- aaudio_result_t result = resolveSharedMemory(mmapFd);
- status = AAudioConvert_aaudioToAndroidStatus(result);
- if (status != NO_ERROR) goto error;
- }
-
- return AAudioConvert_aaudioToAndroidStatus(validate());
-
-error:
- return status;
-}
-
aaudio_result_t SharedMemoryParcelable::close() {
if (mResolvedAddress != MMAP_UNRESOLVED_ADDRESS) {
int err = munmap(mResolvedAddress, mSizeInBytes);
@@ -104,7 +80,7 @@
mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ | PROT_WRITE,
MAP_SHARED, fd.get(), 0);
if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
- ALOGE("mmap() failed for fd = %d, nBytes = %d, errno = %s",
+ ALOGE("mmap() failed for fd = %d, nBytes = %" PRId64 ", errno = %s",
fd.get(), mSizeInBytes, strerror(errno));
return AAUDIO_ERROR_INTERNAL;
}
@@ -118,7 +94,7 @@
return AAUDIO_ERROR_OUT_OF_RANGE;
} else if ((offsetInBytes + sizeInBytes) > mSizeInBytes) {
ALOGE("out of range, offsetInBytes = %d, "
- "sizeInBytes = %d, mSizeInBytes = %d",
+ "sizeInBytes = %d, mSizeInBytes = %" PRId64,
offsetInBytes, sizeInBytes, mSizeInBytes);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
@@ -148,7 +124,11 @@
aaudio_result_t SharedMemoryParcelable::validate() const {
if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE_BYTES) {
- ALOGE("invalid mSizeInBytes = %d", mSizeInBytes);
+ ALOGE("invalid mSizeInBytes = %" PRId64, mSizeInBytes);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (mOffsetInBytes != 0) {
+ ALOGE("invalid mOffsetInBytes = %" PRId64, mOffsetInBytes);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
return AAUDIO_OK;
@@ -156,5 +136,5 @@
void SharedMemoryParcelable::dump() {
ALOGD("mFd = %d", mFd.get());
- ALOGD("mSizeInBytes = %d", mSizeInBytes);
+ ALOGD("mSizeInBytes = %" PRId64, mSizeInBytes);
}
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
index 3927f58..1f2c335 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.h
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -21,8 +21,7 @@
#include <sys/mman.h>
#include <android-base/unique_fd.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <android/media/SharedFileRegion.h>
namespace aaudio {
@@ -36,10 +35,14 @@
* It may be divided into several regions.
* The memory can be shared using Binder or simply shared between threads.
*/
-class SharedMemoryParcelable : public android::Parcelable {
+class SharedMemoryParcelable {
public:
- SharedMemoryParcelable();
- virtual ~SharedMemoryParcelable();
+ SharedMemoryParcelable() = default;
+
+ // Ctor from a parcelable representation.
+ // Since the parcelable object owns a unique FD, move semantics are provided to avoid the need
+ // to dupe.
+ explicit SharedMemoryParcelable(android::media::SharedFileRegion&& parcelable);
/**
* Make a dup() of the fd and store it for later use.
@@ -49,10 +52,6 @@
*/
void setup(const android::base::unique_fd& fd, int32_t sizeInBytes);
- virtual android::status_t writeToParcel(android::Parcel* parcel) const override;
-
- virtual android::status_t readFromParcel(const android::Parcel* parcel) override;
-
// mmap() shared memory
aaudio_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
@@ -63,20 +62,23 @@
void dump();
-protected:
+ // Extract a parcelable representation of this object.
+ // Since we own a unique FD, move semantics are provided to avoid the need to dupe.
+ android::media::SharedFileRegion parcelable() &&;
-#define MMAP_UNRESOLVED_ADDRESS reinterpret_cast<uint8_t*>(MAP_FAILED)
-
- aaudio_result_t resolveSharedMemory(const android::base::unique_fd& fd);
-
- android::base::unique_fd mFd;
- int32_t mSizeInBytes = 0;
- uint8_t *mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
+ // Copy this instance. Duplicates the underlying FD.
+ SharedMemoryParcelable dup() const;
private:
+#define MMAP_UNRESOLVED_ADDRESS reinterpret_cast<uint8_t*>(MAP_FAILED)
+ android::base::unique_fd mFd;
+ int64_t mSizeInBytes = 0;
+ int64_t mOffsetInBytes = 0;
+ uint8_t *mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
+
+ aaudio_result_t resolveSharedMemory(const android::base::unique_fd& fd);
aaudio_result_t validate() const;
-
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
index c776116..56b99c0 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -36,8 +36,18 @@
using namespace aaudio;
-SharedRegionParcelable::SharedRegionParcelable() {}
-SharedRegionParcelable::~SharedRegionParcelable() {}
+SharedRegionParcelable::SharedRegionParcelable(const SharedRegion& parcelable)
+ : mSharedMemoryIndex(parcelable.sharedMemoryIndex),
+ mOffsetInBytes(parcelable.offsetInBytes),
+ mSizeInBytes(parcelable.sizeInBytes) {}
+
+SharedRegion SharedRegionParcelable::parcelable() const {
+ SharedRegion result;
+ result.sharedMemoryIndex = mSharedMemoryIndex;
+ result.offsetInBytes = mOffsetInBytes;
+ result.sizeInBytes = mSizeInBytes;
+ return result;
+}
void SharedRegionParcelable::setup(int32_t sharedMemoryIndex,
int32_t offsetInBytes,
@@ -47,41 +57,6 @@
mSizeInBytes = sizeInBytes;
}
-status_t SharedRegionParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeInt32(mSizeInBytes);
- if (status != NO_ERROR) goto error;
- if (mSizeInBytes > 0) {
- status = parcel->writeInt32(mSharedMemoryIndex);
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(mOffsetInBytes);
- if (status != NO_ERROR) goto error;
- }
- return NO_ERROR;
-
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
-status_t SharedRegionParcelable::readFromParcel(const Parcel* parcel) {
- status_t status = parcel->readInt32(&mSizeInBytes);
- if (status != NO_ERROR) goto error;
- if (mSizeInBytes > 0) {
- status = parcel->readInt32(&mSharedMemoryIndex);
- if (status != NO_ERROR) goto error;
- status = parcel->readInt32(&mOffsetInBytes);
- if (status != NO_ERROR) goto error;
- }
- return AAudioConvert_aaudioToAndroidStatus(validate());
-
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
aaudio_result_t SharedRegionParcelable::resolve(SharedMemoryParcelable *memoryParcels,
void **regionAddressPtr) {
if (mSizeInBytes == 0) {
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.h b/media/libaaudio/src/binding/SharedRegionParcelable.h
index 0cd8c04..c15fc30 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.h
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.h
@@ -20,41 +20,39 @@
#include <stdint.h>
#include <sys/mman.h>
-#include <binder/Parcelable.h>
#include <aaudio/AAudio.h>
+#include <aaudio/SharedRegion.h>
#include "binding/SharedMemoryParcelable.h"
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
namespace aaudio {
-class SharedRegionParcelable : public Parcelable {
+class SharedRegionParcelable {
public:
- SharedRegionParcelable();
- virtual ~SharedRegionParcelable();
+ SharedRegionParcelable() = default;
+
+ // Construct based on a parcelable representation.
+ explicit SharedRegionParcelable(const SharedRegion& parcelable);
void setup(int32_t sharedMemoryIndex, int32_t offsetInBytes, int32_t sizeInBytes);
- virtual status_t writeToParcel(Parcel* parcel) const override;
-
- virtual status_t readFromParcel(const Parcel* parcel) override;
-
aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
void dump();
-protected:
+ // Extract a parcelable representation of this object.
+ SharedRegion parcelable() const;
+
+private:
int32_t mSharedMemoryIndex = -1;
int32_t mOffsetInBytes = 0;
int32_t mSizeInBytes = 0;
-private:
aaudio_result_t validate() const;
};
diff --git a/media/libaaudio/src/binding/aidl/aaudio/Endpoint.aidl b/media/libaaudio/src/binding/aidl/aaudio/Endpoint.aidl
new file mode 100644
index 0000000..3600b6a
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/Endpoint.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+import aaudio.RingBuffer;
+import android.media.SharedFileRegion;
+
+parcelable Endpoint {
+ // Set capacityInFrames to zero if Queue is unused.
+ RingBuffer upMessageQueueParcelable; // server to client
+ RingBuffer downMessageQueueParcelable; // to server
+ RingBuffer upDataQueueParcelable; // eg. record, could share same queue
+ RingBuffer downDataQueueParcelable; // eg. playback
+ SharedFileRegion[] sharedMemories;
+}
diff --git a/media/libaaudio/src/binding/aidl/aaudio/IAAudioClient.aidl b/media/libaaudio/src/binding/aidl/aaudio/IAAudioClient.aidl
new file mode 100644
index 0000000..a010dbc
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/IAAudioClient.aidl
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+interface IAAudioClient {
+ oneway void onStreamChange(int handle, int opcode, int value);
+}
diff --git a/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl b/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
new file mode 100644
index 0000000..44d2211
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+import aaudio.Endpoint;
+import aaudio.IAAudioClient;
+import aaudio.StreamParameters;
+import aaudio.StreamRequest;
+
+interface IAAudioService {
+ /**
+ * Register an object to receive audio input/output change and track notifications.
+ * For a given calling pid, AAudio service disregards any registrations after the first.
+ * Thus the IAAudioClient must be a singleton per process.
+ */
+ void registerClient(IAAudioClient client);
+
+ /**
+ * @param request info needed to create the stream
+ * @param paramsOut contains information about the created stream
+ * @return handle to the stream or a negative error
+ */
+ int openStream(in StreamRequest request,
+ out StreamParameters paramsOut);
+
+ int closeStream(int streamHandle);
+
+ /*
+ * Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+ int getStreamDescription(int streamHandle, out Endpoint endpoint);
+
+ /**
+ * Start the flow of data.
+ * This is asynchronous. When complete, the service will send a STARTED event.
+ */
+ int startStream(int streamHandle);
+
+ /**
+ * Stop the flow of data such that start() can resume without loss of data.
+ * This is asynchronous. When complete, the service will send a PAUSED event.
+ */
+ int pauseStream(int streamHandle);
+
+ /**
+ * Stop the flow of data such that the data currently in the buffer is played.
+ * This is asynchronous. When complete, the service will send a STOPPED event.
+ */
+ int stopStream(int streamHandle);
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ * This is asynchronous. When complete, the service will send a FLUSHED event.
+ */
+ int flushStream(int streamHandle);
+
+ /**
+ * Manage the specified thread as a low latency audio thread.
+ */
+ int registerAudioThread(int streamHandle,
+ int clientThreadId,
+ long periodNanoseconds);
+
+ int unregisterAudioThread(int streamHandle,
+ int clientThreadId);
+}
diff --git a/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl b/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
new file mode 100644
index 0000000..a58b33a
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+import aaudio.SharedRegion;
+
+parcelable RingBuffer {
+ SharedRegion readCounterParcelable;
+ SharedRegion writeCounterParcelable;
+ SharedRegion dataParcelable;
+ int bytesPerFrame; // index is in frames
+ int framesPerBurst; // for ISOCHRONOUS queues
+ int capacityInFrames; // zero if unused
+ int /* RingbufferFlags */ flags; // = RingbufferFlags::NONE;
+}
\ No newline at end of file
diff --git a/media/libaaudio/src/binding/aidl/aaudio/SharedRegion.aidl b/media/libaaudio/src/binding/aidl/aaudio/SharedRegion.aidl
new file mode 100644
index 0000000..26153e8
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/SharedRegion.aidl
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+parcelable SharedRegion {
+ int sharedMemoryIndex;
+ int offsetInBytes;
+ int sizeInBytes;
+}
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
new file mode 100644
index 0000000..b7c4f70
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+import android.media.audio.common.AudioFormat;
+
+parcelable StreamParameters {
+ int samplesPerFrame; // = AAUDIO_UNSPECIFIED;
+ int sampleRate; // = AAUDIO_UNSPECIFIED;
+ int deviceId; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_sharing_mode_t */ sharingMode; // = AAUDIO_SHARING_MODE_SHARED;
+ AudioFormat audioFormat; // = AUDIO_FORMAT_DEFAULT;
+ int /* aaudio_direction_t */ direction; // = AAUDIO_DIRECTION_OUTPUT;
+ int /* aaudio_usage_t */ usage; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_content_type_t */ contentType; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_input_preset_t */ inputPreset; // = AAUDIO_UNSPECIFIED;
+ int bufferCapacity; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_allowed_capture_policy_t */ allowedCapturePolicy; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_session_id_t */ sessionId; // = AAUDIO_SESSION_ID_NONE;
+ boolean isPrivacySensitive; // = false;
+}
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl
new file mode 100644
index 0000000..9bf4077
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+import aaudio.StreamParameters;
+
+parcelable StreamRequest {
+ StreamParameters params;
+ int userId; // = (uid_t) -1;
+ int processId; // = (pid_t) -1;
+ boolean sharingModeMatchRequired; // = false;
+ boolean inService; // = false; // Stream opened by AAudioservice
+}
\ No newline at end of file
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index ac7ad9a..2688597 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -34,7 +34,6 @@
#include "AudioEndpointParcelable.h"
#include "binding/AAudioStreamRequest.h"
#include "binding/AAudioStreamConfiguration.h"
-#include "binding/IAAudioService.h"
#include "binding/AAudioServiceMessage.h"
#include "core/AudioGlobal.h"
#include "core/AudioStreamBuilder.h"
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 63be978..162f098 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -20,7 +20,6 @@
#include <stdint.h>
#include <aaudio/AAudio.h>
-#include "binding/IAAudioService.h"
#include "binding/AudioEndpointParcelable.h"
#include "binding/AAudioServiceInterface.h"
#include "client/IsochronousClockModel.h"
@@ -29,7 +28,6 @@
#include "utility/AudioClock.h"
using android::sp;
-using android::IAAudioService;
namespace aaudio {
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 1d65d87..251a7f2 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -23,7 +23,6 @@
#include "client/AudioStreamInternal.h"
using android::sp;
-using android::IAAudioService;
namespace aaudio {
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index b47b472..980592c 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -32,6 +32,7 @@
#define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
: "AudioStreamInternalPlay_Client")
+using android::status_t;
using android::WrappingBuffer;
using namespace aaudio;
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index be95da6..7b1cddc 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -25,7 +25,6 @@
#include "client/AudioStreamInternal.h"
using android::sp;
-using android::IAAudioService;
namespace aaudio {
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 43b63d6..a8ae0fb 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -118,6 +118,7 @@
setDeviceFormat(getFormat());
}
+ // To avoid glitching, let AudioFlinger pick the optimal burst size.
uint32_t notificationFrames = 0;
// Setup the callback if there is one.
@@ -128,7 +129,6 @@
streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
callback = getLegacyCallback();
callbackData = this;
- notificationFrames = builder.getFramesPerDataCallback();
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 9e826bd..4ba08fd 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -96,6 +96,7 @@
size_t frameCount = (size_t)builder.getBufferCapacity();
+ // To avoid glitching, let AudioFlinger pick the optimal burst size.
int32_t notificationFrames = 0;
const audio_format_t format = (getFormat() == AUDIO_FORMAT_DEFAULT)
@@ -118,8 +119,6 @@
// Take advantage of a special trick that allows us to create a buffer
// that is some multiple of the burst size.
notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
- } else {
- notificationFrames = builder.getFramesPerDataCallback();
}
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 8935d57..95d6543 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -11,10 +11,12 @@
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_marshalling.cpp"],
shared_libs: [
+ "aaudio-aidl-cpp",
"libaaudio_internal",
"libbinder",
"libcutils",
"libutils",
+ "shared-file-region-aidl-unstable-cpp",
],
}
diff --git a/media/libaaudio/tests/test_marshalling.cpp b/media/libaaudio/tests/test_marshalling.cpp
index c51fbce..49213dc 100644
--- a/media/libaaudio/tests/test_marshalling.cpp
+++ b/media/libaaudio/tests/test_marshalling.cpp
@@ -33,6 +33,29 @@
using namespace android;
using namespace aaudio;
+template<typename T>
+T copy(const T& object) {
+ return T(object);
+}
+
+template<>
+SharedMemoryParcelable copy<SharedMemoryParcelable>(const SharedMemoryParcelable& object) {
+ return object.dup();
+}
+
+template<typename T>
+void writeToParcel(const T& object, Parcel* parcel) {
+ copy(object).parcelable().writeToParcel(parcel);
+}
+
+template<typename T>
+T readFromParcel(const Parcel& parcel) {
+ using ParcelType = std::decay_t<decltype(std::declval<T>().parcelable())>;
+ ParcelType parcelable;
+ parcelable.readFromParcel(&parcel);
+ return T(std::move(parcelable));
+}
+
// Test adding one value.
TEST(test_marshalling, aaudio_one_read_write) {
Parcel parcel;
@@ -48,7 +71,6 @@
// Test SharedMemoryParcel.
TEST(test_marshalling, aaudio_shared_memory) {
SharedMemoryParcelable sharedMemoryA;
- SharedMemoryParcelable sharedMemoryB;
const size_t memSizeBytes = 840;
unique_fd fd(ashmem_create_region("TestMarshalling", memSizeBytes));
ASSERT_LE(0, fd);
@@ -63,10 +85,10 @@
Parcel parcel;
size_t pos = parcel.dataPosition();
- sharedMemoryA.writeToParcel(&parcel);
+ writeToParcel(sharedMemoryA, &parcel);
parcel.setDataPosition(pos);
- sharedMemoryB.readFromParcel(&parcel);
+ SharedMemoryParcelable sharedMemoryB = readFromParcel<SharedMemoryParcelable>(parcel);
EXPECT_EQ(sharedMemoryA.getSizeInBytes(), sharedMemoryB.getSizeInBytes());
// should see same value at two different addresses
@@ -81,7 +103,6 @@
TEST(test_marshalling, aaudio_shared_region) {
SharedMemoryParcelable sharedMemories[2];
SharedRegionParcelable sharedRegionA;
- SharedRegionParcelable sharedRegionB;
const size_t memSizeBytes = 840;
unique_fd fd(ashmem_create_region("TestMarshalling", memSizeBytes));
ASSERT_LE(0, fd);
@@ -97,10 +118,10 @@
Parcel parcel;
size_t pos = parcel.dataPosition();
- sharedRegionA.writeToParcel(&parcel);
+ writeToParcel(sharedRegionA, &parcel);
parcel.setDataPosition(pos);
- sharedRegionB.readFromParcel(&parcel);
+ SharedRegionParcelable sharedRegionB = readFromParcel<SharedRegionParcelable>(parcel);
// should see same value
void *region2;
@@ -113,7 +134,6 @@
TEST(test_marshalling, aaudio_ring_buffer_parcelable) {
SharedMemoryParcelable sharedMemories[2];
RingBufferParcelable ringBufferA;
- RingBufferParcelable ringBufferB;
const size_t bytesPerFrame = 8;
const size_t framesPerBurst = 32;
@@ -147,11 +167,11 @@
// write A to parcel
Parcel parcel;
size_t pos = parcel.dataPosition();
- ringBufferA.writeToParcel(&parcel);
+ writeToParcel(ringBufferA, &parcel);
// read B from parcel
parcel.setDataPosition(pos);
- ringBufferB.readFromParcel(&parcel);
+ RingBufferParcelable ringBufferB = readFromParcel<RingBufferParcelable>(parcel);
RingBufferDescriptor descriptorB;
EXPECT_EQ(AAUDIO_OK, ringBufferB.resolve(sharedMemories, &descriptorB));
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 2a1e56c..d7e9461 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -118,14 +118,14 @@
export_header_lib_headers: ["libaudioclient_headers"],
export_static_lib_headers: [
"effect-aidl-cpp",
- "shared-file-region-aidl-cpp",
+ "shared-file-region-aidl-unstable-cpp",
],
static_libs: [
"effect-aidl-cpp",
// for memory heap analysis
"libc_malloc_debug_backtrace",
- "shared-file-region-aidl-cpp",
+ "shared-file-region-aidl-unstable-cpp",
],
cflags: [
"-Wall",
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 509e063..d6671e3 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -742,6 +742,8 @@
void *iMemPointer;
audio_track_cblk_t* cblk;
status_t status;
+ static const int32_t kMaxCreateAttempts = 3;
+ int32_t remainingAttempts = kMaxCreateAttempts;
if (audioFlinger == 0) {
ALOGE("%s(%d): Could not get audioflinger", __func__, mPortId);
@@ -803,15 +805,24 @@
input.sessionId = mSessionId;
originalSessionId = mSessionId;
- record = audioFlinger->createRecord(input,
- output,
- &status);
+ do {
+ record = audioFlinger->createRecord(input, output, &status);
+ if (status == NO_ERROR) {
+ break;
+ }
+ if (status != FAILED_TRANSACTION || --remainingAttempts <= 0) {
+ ALOGE("%s(%d): AudioFlinger could not create record track, status: %d",
+ __func__, mPortId, status);
+ goto exit;
+ }
+ // FAILED_TRANSACTION happens under very specific conditions causing a state mismatch
+ // between audio policy manager and audio flinger during the input stream open sequence
+ // and can be recovered by retrying.
+ // Leave time for race condition to clear before retrying and randomize delay
+ // to reduce the probability of concurrent retries in locked steps.
+ usleep((20 + rand() % 30) * 10000);
+ } while (1);
- if (status != NO_ERROR) {
- ALOGE("%s(%d): AudioFlinger could not create record track, status: %d",
- __func__, mPortId, status);
- goto exit;
- }
ALOG_ASSERT(record != 0);
// AudioFlinger now owns the reference to the I/O handle,
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 49c4bc0..edb0889 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -47,8 +47,9 @@
record_config_callback AudioSystem::gRecordConfigCallback = NULL;
// Required to be held while calling into gSoundTriggerCaptureStateListener.
+class CaptureStateListenerImpl;
Mutex gSoundTriggerCaptureStateListenerLock;
-sp<AudioSystem::CaptureStateListener> gSoundTriggerCaptureStateListener = nullptr;
+sp<CaptureStateListenerImpl> gSoundTriggerCaptureStateListener = nullptr;
// establish binder interface to AudioFlinger service
const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
@@ -1634,45 +1635,110 @@
return aps->getDevicesForRoleAndStrategy(strategy, role, devices);
}
+status_t AudioSystem::setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->setDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioSystem::addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->addDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioSystem::removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector& devices)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->removeDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioSystem::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->clearDevicesRoleForCapturePreset(audioSource, role);
+}
+
+status_t AudioSystem::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->getDevicesForRoleAndCapturePreset(audioSource, role, devices);
+}
+
class CaptureStateListenerImpl : public media::BnCaptureStateListener,
public IBinder::DeathRecipient {
public:
+ CaptureStateListenerImpl(
+ const sp<IAudioPolicyService>& aps,
+ const sp<AudioSystem::CaptureStateListener>& listener)
+ : mAps(aps), mListener(listener) {}
+
+ void init() {
+ bool active;
+ status_t status = mAps->registerSoundTriggerCaptureStateListener(this, &active);
+ if (status != NO_ERROR) {
+ mListener->onServiceDied();
+ return;
+ }
+ mListener->onStateChanged(active);
+ IInterface::asBinder(mAps)->linkToDeath(this);
+ }
+
binder::Status setCaptureState(bool active) override {
Mutex::Autolock _l(gSoundTriggerCaptureStateListenerLock);
- gSoundTriggerCaptureStateListener->onStateChanged(active);
+ mListener->onStateChanged(active);
return binder::Status::ok();
}
void binderDied(const wp<IBinder>&) override {
Mutex::Autolock _l(gSoundTriggerCaptureStateListenerLock);
- gSoundTriggerCaptureStateListener->onServiceDied();
+ mListener->onServiceDied();
gSoundTriggerCaptureStateListener = nullptr;
}
+
+private:
+ // Need this in order to keep the death receipent alive.
+ sp<IAudioPolicyService> mAps;
+ sp<AudioSystem::CaptureStateListener> mListener;
};
status_t AudioSystem::registerSoundTriggerCaptureStateListener(
const sp<CaptureStateListener>& listener) {
+ LOG_ALWAYS_FATAL_IF(listener == nullptr);
+
const sp<IAudioPolicyService>& aps =
AudioSystem::get_audio_policy_service();
if (aps == 0) {
return PERMISSION_DENIED;
}
- sp<CaptureStateListenerImpl> wrapper = new CaptureStateListenerImpl();
-
Mutex::Autolock _l(gSoundTriggerCaptureStateListenerLock);
+ gSoundTriggerCaptureStateListener = new CaptureStateListenerImpl(aps, listener);
+ gSoundTriggerCaptureStateListener->init();
- bool active;
- status_t status =
- aps->registerSoundTriggerCaptureStateListener(wrapper, &active);
- if (status != NO_ERROR) {
- listener->onServiceDied();
- return NO_ERROR;
- }
- gSoundTriggerCaptureStateListener = listener;
- listener->onStateChanged(active);
- sp<IBinder> binder = IInterface::asBinder(aps);
- binder->linkToDeath(wrapper);
return NO_ERROR;
}
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 807aa13..41af78c 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -210,7 +210,11 @@
return NO_ERROR;
}
-AudioTrack::AudioTrack()
+AudioTrack::AudioTrack() : AudioTrack("" /*opPackageName*/)
+{
+}
+
+AudioTrack::AudioTrack(const std::string& opPackageName)
: mStatus(NO_INIT),
mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -218,6 +222,7 @@
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mOpPackageName(opPackageName),
mAudioTrackCallback(new AudioTrackCallback())
{
mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
@@ -244,12 +249,14 @@
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed,
- audio_port_handle_t selectedDeviceId)
+ audio_port_handle_t selectedDeviceId,
+ const std::string& opPackageName)
: mStatus(NO_INIT),
mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
+ mOpPackageName(opPackageName),
mAudioTrackCallback(new AudioTrackCallback())
{
mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
@@ -277,13 +284,15 @@
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
- float maxRequiredSpeed)
+ float maxRequiredSpeed,
+ const std::string& opPackageName)
: mStatus(NO_INIT),
mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mOpPackageName(opPackageName),
mAudioTrackCallback(new AudioTrackCallback())
{
mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
@@ -1555,6 +1564,7 @@
input.selectedDeviceId = mSelectedDeviceId;
input.sessionId = mSessionId;
input.audioTrackCallback = mAudioTrackCallback;
+ input.opPackageName = mOpPackageName;
IAudioFlinger::CreateTrackOutput output;
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 225713a..b4e07e0 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -24,6 +24,7 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
+#include <media/IAudioPolicyService.h>
#include <mediautils/ServiceUtilities.h>
#include <mediautils/TimeCheck.h>
#include "IAudioFlinger.h"
@@ -1024,6 +1025,16 @@
std::string tag("IAudioFlinger command " + std::to_string(code));
TimeCheck check(tag.c_str());
+ // Make sure we connect to Audio Policy Service before calling into AudioFlinger:
+ // - AudioFlinger can call into Audio Policy Service with its global mutex held
+ // - If this is the first time Audio Policy Service is queried from inside audioserver process
+ // this will trigger Audio Policy Manager initialization.
+ // - Audio Policy Manager initialization calls into AudioFlinger which will try to lock
+ // its global mutex and a deadlock will occur.
+ if (IPCThreadState::self()->getCallingPid() != getpid()) {
+ AudioSystem::get_audio_policy_service();
+ }
+
switch (code) {
case CREATE_TRACK: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 1491afe..9d3212b 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -119,6 +119,11 @@
AUDIO_MODULES_UPDATED, // oneway
SET_CURRENT_IME_UID,
REGISTER_SOUNDTRIGGER_CAPTURE_STATE_LISTENER,
+ SET_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ ADD_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ REMOVE_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ CLEAR_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ GET_DEVICES_FOR_ROLE_AND_CAPTURE_PRESET,
};
#define MAX_ITEMS_PER_LIST 1024
@@ -1408,6 +1413,95 @@
return static_cast<status_t>(reply.readInt32());
}
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = data.writeParcelableVector(devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = remote()->transact(SET_DEVICES_ROLE_FOR_CAPTURE_PRESET, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = data.writeParcelableVector(devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = remote()->transact(ADD_DEVICES_ROLE_FOR_CAPTURE_PRESET, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = data.writeParcelableVector(devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = remote()->transact(REMOVE_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t clearDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = remote()->transact(CLEAR_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = remote()->transact(GET_DEVICES_FOR_ROLE_AND_CAPTURE_PRESET,
+ data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = reply.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
virtual status_t getDevicesForAttributes(const AudioAttributes &aa,
AudioDeviceTypeAddrVector *devices) const
{
@@ -1544,7 +1638,12 @@
case SET_ALLOWED_CAPTURE_POLICY:
case AUDIO_MODULES_UPDATED:
case SET_CURRENT_IME_UID:
- case REGISTER_SOUNDTRIGGER_CAPTURE_STATE_LISTENER: {
+ case REGISTER_SOUNDTRIGGER_CAPTURE_STATE_LISTENER:
+ case SET_DEVICES_ROLE_FOR_CAPTURE_PRESET:
+ case ADD_DEVICES_ROLE_FOR_CAPTURE_PRESET:
+ case REMOVE_DEVICES_ROLE_FOR_CAPTURE_PRESET:
+ case CLEAR_DEVICES_ROLE_FOR_CAPTURE_PRESET:
+ case GET_DEVICES_FOR_ROLE_AND_CAPTURE_PRESET: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
@@ -2729,6 +2828,71 @@
return NO_ERROR;
} break;
+ case SET_DEVICES_ROLE_FOR_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = data.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = setDevicesRoleForCapturePreset(audioSource, role, devices);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
+ case ADD_DEVICES_ROLE_FOR_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = data.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = addDevicesRoleForCapturePreset(audioSource, role, devices);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
+ case REMOVE_DEVICES_ROLE_FOR_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = data.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = removeDevicesRoleForCapturePreset(audioSource, role, devices);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
+ case CLEAR_DEVICES_ROLE_FOR_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ status_t status = clearDevicesRoleForCapturePreset(audioSource, role);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
+ case GET_DEVICES_FOR_ROLE_AND_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = getDevicesForRoleAndCapturePreset(audioSource, role, devices);
+ status_t marshall_status = reply->writeParcelableVector(devices);
+ if (marshall_status != NO_ERROR) {
+ return marshall_status;
+ }
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 09025d1..848743a 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -433,6 +433,22 @@
static status_t getDevicesForRoleAndStrategy(product_strategy_t strategy,
device_role_t role, AudioDeviceTypeAddrVector &devices);
+ static status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices);
+
+ static status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices);
+
+ static status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices);
+
+ static status_t clearDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role);
+
+ static status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices);
+
static status_t getDeviceForStrategy(product_strategy_t strategy,
AudioDeviceTypeAddr &device);
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 0dbd842..a9946da 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -26,6 +26,8 @@
#include <media/Modulo.h>
#include <utils/threads.h>
+#include <string>
+
#include "android/media/BnAudioTrackCallback.h"
#include "android/media/IAudioTrackCallback.h"
@@ -177,6 +179,8 @@
*/
AudioTrack();
+ AudioTrack(const std::string& opPackageName);
+
/* Creates an AudioTrack object and registers it with AudioFlinger.
* Once created, the track needs to be started before it can be used.
* Unspecified values are set to appropriate default values.
@@ -258,7 +262,8 @@
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+ const std::string& opPackageName = "");
/* Creates an audio track and registers it with AudioFlinger.
* With this constructor, the track is configured for static buffer mode.
@@ -288,7 +293,8 @@
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
- float maxRequiredSpeed = 1.0f);
+ float maxRequiredSpeed = 1.0f,
+ const std::string& opPackageName = "");
/* Terminates the AudioTrack and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioTrack.
@@ -1236,6 +1242,8 @@
sp<media::VolumeHandler> mVolumeHandler;
+ const std::string mOpPackageName;
+
private:
class DeathNotifier : public IBinder::DeathRecipient {
public:
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index b950d0f..a01b681 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -35,6 +35,7 @@
#include <system/audio_policy.h>
#include <utils/String8.h>
#include <media/MicrophoneInfo.h>
+#include <string>
#include <vector>
#include "android/media/IAudioRecord.h"
@@ -85,6 +86,11 @@
speed = parcel->readFloat();
audioTrackCallback = interface_cast<media::IAudioTrackCallback>(
parcel->readStrongBinder());
+ const char* opPackageNamePtr = parcel->readCString();
+ if (opPackageNamePtr == nullptr) {
+ return FAILED_TRANSACTION;
+ }
+ opPackageName = opPackageNamePtr;
/* input/output arguments*/
(void)parcel->read(&flags, sizeof(audio_output_flags_t));
@@ -109,6 +115,7 @@
(void)parcel->writeInt32(notificationsPerBuffer);
(void)parcel->writeFloat(speed);
(void)parcel->writeStrongBinder(IInterface::asBinder(audioTrackCallback));
+ (void)parcel->writeCString(opPackageName.c_str());
/* input/output arguments*/
(void)parcel->write(&flags, sizeof(audio_output_flags_t));
@@ -127,6 +134,7 @@
uint32_t notificationsPerBuffer;
float speed;
sp<media::IAudioTrackCallback> audioTrackCallback;
+ std::string opPackageName;
/* input/output */
audio_output_flags_t flags;
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index afb0fda..2d5f687 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -252,6 +252,25 @@
device_role_t role,
AudioDeviceTypeAddrVector &devices) = 0;
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices) = 0;
+
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role) = 0;
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices) = 0;
+
// The return code here is only intended to represent transport errors. The
// actual server implementation should always return NO_ERROR.
virtual status_t registerSoundTriggerCaptureStateListener(
diff --git a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
index da2e109..d8fce38 100644
--- a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
+++ b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
@@ -19,6 +19,7 @@
#include <arpa/inet.h>
#include <iostream>
#include <regex>
+#include <set>
#include <sstream>
namespace android {
@@ -80,6 +81,14 @@
return false;
}
+bool AudioDeviceTypeAddr::operator==(const AudioDeviceTypeAddr &rhs) const {
+ return equals(rhs);
+}
+
+bool AudioDeviceTypeAddr::operator!=(const AudioDeviceTypeAddr &rhs) const {
+ return !operator==(rhs);
+}
+
void AudioDeviceTypeAddr::reset() {
mType = AUDIO_DEVICE_NONE;
setAddress("");
@@ -118,6 +127,20 @@
return deviceTypes;
}
+AudioDeviceTypeAddrVector excludeDeviceTypeAddrsFrom(
+ const AudioDeviceTypeAddrVector& devices,
+ const AudioDeviceTypeAddrVector& devicesToExclude) {
+ std::set<AudioDeviceTypeAddr> devicesToExcludeSet(
+ devicesToExclude.begin(), devicesToExclude.end());
+ AudioDeviceTypeAddrVector remainedDevices;
+ for (const auto& device : devices) {
+ if (devicesToExcludeSet.count(device) == 0) {
+ remainedDevices.push_back(device);
+ }
+ }
+ return remainedDevices;
+}
+
std::string dumpAudioDeviceTypeAddrVector(const AudioDeviceTypeAddrVector& deviceTypeAddrs,
bool includeSensitiveInfo) {
std::stringstream stream;
diff --git a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
index 3e03df7..7497faf 100644
--- a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
+++ b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
@@ -47,6 +47,10 @@
bool operator<(const AudioDeviceTypeAddr& other) const;
+ bool operator==(const AudioDeviceTypeAddr& rhs) const;
+
+ bool operator!=(const AudioDeviceTypeAddr& rhs) const;
+
void reset();
std::string toString(bool includeSensitiveInfo=false) const;
@@ -69,6 +73,14 @@
*/
DeviceTypeSet getAudioDeviceTypes(const AudioDeviceTypeAddrVector& deviceTypeAddrs);
+/**
+ * Return a collection of AudioDeviceTypeAddrs that are shown in `devices` but not
+ * in `devicesToExclude`
+ */
+AudioDeviceTypeAddrVector excludeDeviceTypeAddrsFrom(
+ const AudioDeviceTypeAddrVector& devices,
+ const AudioDeviceTypeAddrVector& devicesToExclude);
+
std::string dumpAudioDeviceTypeAddrVector(const AudioDeviceTypeAddrVector& deviceTypeAddrs,
bool includeSensitiveInfo=false);
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 1f2a5e1..ee69cfb 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -30,7 +30,6 @@
"Bundle/src/LVM_Control.cpp",
"SpectrumAnalyzer/src/LVPSA_Control.cpp",
"SpectrumAnalyzer/src/LVPSA_Init.cpp",
- "SpectrumAnalyzer/src/LVPSA_Memory.cpp",
"SpectrumAnalyzer/src/LVPSA_Process.cpp",
"SpectrumAnalyzer/src/LVPSA_QPD_Init.cpp",
"SpectrumAnalyzer/src/LVPSA_QPD_Process.cpp",
@@ -137,7 +136,6 @@
],
cppflags: [
"-fvisibility=hidden",
- "-DSUPPORT_MC",
"-Wall",
"-Werror",
diff --git a/media/libeffects/lvm/lib/Bass/lib/LVDBE.h b/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
index 948d79c..cb69c88 100644
--- a/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
+++ b/media/libeffects/lvm/lib/Bass/lib/LVDBE.h
@@ -69,9 +69,6 @@
/* */
/****************************************************************************************/
-/* Memory table*/
-#define LVDBE_NR_MEMORY_REGIONS 4 /* Number of memory regions */
-
/* Bass Enhancement effect level */
#define LVDBE_EFFECT_03DB 3 /* Effect defines for backwards compatibility */
#define LVDBE_EFFECT_06DB 6
@@ -112,25 +109,12 @@
LVDBE_VOLUME_MAX = LVM_MAXINT_32
} LVDBE_Volume_en;
-/* Memory Types */
-typedef enum
-{
- LVDBE_PERSISTENT = 0,
- LVDBE_PERSISTENT_DATA = 1,
- LVDBE_PERSISTENT_COEF = 2,
- LVDBE_SCRATCH = 3,
- LVDBE_MEMORY_MAX = LVM_MAXINT_32
-
-} LVDBE_MemoryTypes_en;
-
/* Function return status */
typedef enum
{
LVDBE_SUCCESS = 0, /* Successful return from a routine */
- LVDBE_ALIGNMENTERROR = 1, /* Memory alignment error */
- LVDBE_NULLADDRESS = 2, /* NULL allocation address */
- LVDBE_TOOMANYSAMPLES = 3, /* Maximum block size exceeded */
- LVDBE_SIZEERROR = 4, /* Incorrect structure size */
+ LVDBE_NULLADDRESS = 1, /* NULL allocation address */
+ LVDBE_TOOMANYSAMPLES = 2, /* Maximum block size exceeded */
LVDBE_STATUS_MAX = LVM_MAXINT_32
} LVDBE_ReturnStatus_en;
@@ -213,21 +197,6 @@
/* */
/****************************************************************************************/
-/* Memory region definition */
-typedef struct
-{
- LVM_UINT32 Size; /* Region size in bytes */
- LVM_UINT16 Alignment; /* Region alignment in bytes */
- LVDBE_MemoryTypes_en Type; /* Region type */
- void *pBaseAddress; /* Pointer to the region base address */
-} LVDBE_MemoryRegion_t;
-
-/* Memory table containing the region definitions */
-typedef struct
-{
- LVDBE_MemoryRegion_t Region[LVDBE_NR_MEMORY_REGIONS]; /* One definition for each region */
-} LVDBE_MemTab_t;
-
/* Parameter structure */
typedef struct
{
@@ -239,9 +208,7 @@
LVDBE_Volume_en VolumeControl;
LVM_INT16 VolumedB;
LVM_INT16 HeadroomdB;
-#ifdef SUPPORT_MC
LVM_INT16 NrChannels;
-#endif
} LVDBE_Params_t;
@@ -261,75 +228,40 @@
/****************************************************************************************/
/* */
-/* FUNCTION: LVDBE_Memory */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) the memory */
-/* base address pointers are NULL on return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the memory */
-/* table returns the allocated memory and base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* pCapabilities Pointer to the default capabilites */
-/* */
-/* RETURNS: */
-/* LVDBE_SUCCESS Succeeded */
-/* */
-/* NOTES: */
-/* 1. This function may be interrupted by the LVDBE_Process function */
-/* */
-/****************************************************************************************/
-
-LVDBE_ReturnStatus_en LVDBE_Memory(LVDBE_Handle_t hInstance,
- LVDBE_MemTab_t *pMemoryTable,
- LVDBE_Capabilities_t *pCapabilities);
-
-/****************************************************************************************/
-/* */
/* FUNCTION: LVDBE_Init */
/* */
/* DESCRIPTION: */
/* Create and initialisation function for the Bass Enhancement module */
/* */
-/* This function can be used to create an algorithm instance by calling with */
-/* hInstance set to NULL. In this case the algorithm returns the new instance */
-/* handle. */
-/* */
-/* This function can be used to force a full re-initialisation of the algorithm */
-/* by calling with hInstance = Instance Handle. In this case the memory table */
-/* should be correct for the instance, this can be ensured by calling the function */
-/* LVDBE_Memory before calling this function. */
-/* */
/* PARAMETERS: */
-/* hInstance Instance handle */
-/* pMemoryTable Pointer to the memory definition table */
+/* phInstance Pointer to instance handle */
/* pCapabilities Pointer to the initialisation capabilities */
+/* pScratch Pointer to the bundle scratch buffer */
/* */
/* RETURNS: */
-/* LVDBE_SUCCESS Initialisation succeeded */
-/* LVDBE_ALIGNMENTERROR Instance or scratch memory on incorrect alignment */
-/* LVDBE_NULLADDRESS One or more memory has a NULL pointer */
+/* LVDBE_SUCCESS Initialisation succeeded */
+/* LVDBE_NULLADDRESS One or more memory has a NULL pointer - malloc failure */
/* */
/* NOTES: */
-/* 1. The instance handle is the pointer to the base address of the first memory */
-/* region. */
-/* 2. This function must not be interrupted by the LVDBE_Process function */
+/* 1. This function must not be interrupted by the LVDBE_Process function */
/* */
/****************************************************************************************/
+LVDBE_ReturnStatus_en LVDBE_Init(LVDBE_Handle_t *phInstance,
+ LVDBE_Capabilities_t *pCapabilities,
+ void *pScratch);
-LVDBE_ReturnStatus_en LVDBE_Init(LVDBE_Handle_t *phInstance,
- LVDBE_MemTab_t *pMemoryTable,
- LVDBE_Capabilities_t *pCapabilities);
+/****************************************************************************************/
+/* */
+/* FUNCTION: LVDBE_DeInit */
+/* */
+/* DESCRIPTION: */
+/* Free the memories created during LVDBE_Init including instance handle */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to instance handle */
+/* */
+/****************************************************************************************/
+void LVDBE_DeInit(LVDBE_Handle_t *phInstance);
/****************************************************************************************/
/* */
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
index ad77696..bd03dd3 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
@@ -20,176 +20,60 @@
/* Includes */
/* */
/****************************************************************************************/
+#include <stdlib.h>
#include "LVDBE.h"
#include "LVDBE_Private.h"
/****************************************************************************************/
/* */
-/* FUNCTION: LVDBE_Memory */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) the memory */
-/* base address pointers are NULL on return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the memory */
-/* table returns the allocated memory and base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* pCapabilities Pointer to the instance capabilities */
-/* */
-/* RETURNS: */
-/* LVDBE_SUCCESS Succeeded */
-/* */
-/* NOTES: */
-/* 1. This function may be interrupted by the LVDBE_Process function */
-/* */
-/****************************************************************************************/
-
-LVDBE_ReturnStatus_en LVDBE_Memory(LVDBE_Handle_t hInstance,
- LVDBE_MemTab_t *pMemoryTable,
- LVDBE_Capabilities_t *pCapabilities)
-{
-
- LVM_UINT32 ScratchSize;
- LVDBE_Instance_t *pInstance = (LVDBE_Instance_t *)hInstance;
-
- /*
- * Fill in the memory table
- */
- if (hInstance == LVM_NULL)
- {
- /*
- * Instance memory
- */
- pMemoryTable->Region[LVDBE_MEMREGION_INSTANCE].Size = sizeof(LVDBE_Instance_t);
- pMemoryTable->Region[LVDBE_MEMREGION_INSTANCE].Alignment = LVDBE_INSTANCE_ALIGN;
- pMemoryTable->Region[LVDBE_MEMREGION_INSTANCE].Type = LVDBE_PERSISTENT;
- pMemoryTable->Region[LVDBE_MEMREGION_INSTANCE].pBaseAddress = LVM_NULL;
-
- /*
- * Data memory
- */
- pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_DATA].Size = sizeof(LVDBE_Data_FLOAT_t);
- pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_DATA].Alignment = LVDBE_PERSISTENT_DATA_ALIGN;
- pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_DATA].Type = LVDBE_PERSISTENT_DATA;
- pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_DATA].pBaseAddress = LVM_NULL;
-
- /*
- * Coef memory
- */
- pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Size = sizeof(LVDBE_Coef_FLOAT_t);
- pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Alignment = LVDBE_PERSISTENT_COEF_ALIGN;
- pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].Type = LVDBE_PERSISTENT_COEF;
- pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].pBaseAddress = LVM_NULL;
-
- /*
- * Scratch memory
- */
- ScratchSize = (LVM_UINT32)(LVDBE_SCRATCHBUFFERS_INPLACE*sizeof(LVM_FLOAT) * \
- pCapabilities->MaxBlockSize);
- pMemoryTable->Region[LVDBE_MEMREGION_SCRATCH].Size = ScratchSize;
- pMemoryTable->Region[LVDBE_MEMREGION_SCRATCH].Alignment = LVDBE_SCRATCH_ALIGN;
- pMemoryTable->Region[LVDBE_MEMREGION_SCRATCH].Type = LVDBE_SCRATCH;
- pMemoryTable->Region[LVDBE_MEMREGION_SCRATCH].pBaseAddress = LVM_NULL;
- }
- else
- {
- /* Read back memory allocation table */
- *pMemoryTable = pInstance->MemoryTable;
- }
-
- return(LVDBE_SUCCESS);
-}
-
-/****************************************************************************************/
-/* */
/* FUNCTION: LVDBE_Init */
/* */
/* DESCRIPTION: */
-/* Create and initialisation function for the Dynamic Bass Enhancement module */
-/* */
-/* This function can be used to create an algorithm instance by calling with */
-/* hInstance set to NULL. In this case the algorithm returns the new instance */
-/* handle. */
-/* */
-/* This function can be used to force a full re-initialisation of the algorithm */
-/* by calling with hInstance = Instance Handle. In this case the memory table */
-/* should be correct for the instance, this can be ensured by calling the function */
-/* DBE_Memory before calling this function. */
+/* Create and initialisation function for the Bass Enhancement module */
/* */
/* PARAMETERS: */
-/* hInstance Instance handle */
-/* pMemoryTable Pointer to the memory definition table */
-/* pCapabilities Pointer to the instance capabilities */
+/* phInstance Pointer to instance handle */
+/* pCapabilities Pointer to the initialisation capabilities */
+/* pScratch Pointer to the bundle scratch buffer */
/* */
/* RETURNS: */
/* LVDBE_SUCCESS Initialisation succeeded */
-/* LVDBE_ALIGNMENTERROR Instance or scratch memory on incorrect alignment */
-/* LVDBE_NULLADDRESS Instance or scratch memory has a NULL pointer */
+/* LVDBE_NULLADDRESS One or more memory has a NULL pointer - malloc failure */
/* */
/* NOTES: */
-/* 1. The instance handle is the pointer to the base address of the first memory */
-/* region. */
-/* 2. This function must not be interrupted by the LVDBE_Process function */
+/* 1. This function must not be interrupted by the LVDBE_Process function */
/* */
/****************************************************************************************/
-
-LVDBE_ReturnStatus_en LVDBE_Init(LVDBE_Handle_t *phInstance,
- LVDBE_MemTab_t *pMemoryTable,
- LVDBE_Capabilities_t *pCapabilities)
+LVDBE_ReturnStatus_en LVDBE_Init(LVDBE_Handle_t *phInstance,
+ LVDBE_Capabilities_t *pCapabilities,
+ void *pScratch)
{
LVDBE_Instance_t *pInstance;
- LVMixer3_1St_FLOAT_st *pMixer_Instance;
- LVMixer3_2St_FLOAT_st *pBypassMixer_Instance;
+ LVMixer3_1St_FLOAT_st *pMixer_Instance;
+ LVMixer3_2St_FLOAT_st *pBypassMixer_Instance;
LVM_FLOAT MixGain;
- LVM_INT16 i;
/*
- * Set the instance handle if not already initialised
+ * Create the instance handle if not already initialised
*/
if (*phInstance == LVM_NULL)
{
- *phInstance = (LVDBE_Handle_t)pMemoryTable->Region[LVDBE_MEMREGION_INSTANCE].pBaseAddress;
+ *phInstance = calloc(1, sizeof(*pInstance));
+ }
+ if (*phInstance == LVM_NULL)
+ {
+ return LVDBE_NULLADDRESS;
}
pInstance =(LVDBE_Instance_t *)*phInstance;
/*
- * Check the memory table for NULL pointers and incorrectly aligned data
- */
- for (i=0; i<LVDBE_NR_MEMORY_REGIONS; i++)
- {
- if (pMemoryTable->Region[i].Size!=0)
- {
- if (pMemoryTable->Region[i].pBaseAddress==LVM_NULL)
- {
- return(LVDBE_NULLADDRESS);
- }
- if (((uintptr_t)pMemoryTable->Region[i].pBaseAddress % pMemoryTable->Region[i].Alignment)!=0){
- return(LVDBE_ALIGNMENTERROR);
- }
- }
- }
-
- /*
* Save the memory table in the instance structure
*/
pInstance->Capabilities = *pCapabilities;
- /*
- * Save the memory table in the instance structure
- */
- pInstance->MemoryTable = *pMemoryTable;
+ pInstance->pScratch = pScratch;
/*
* Set the default instance parameters
@@ -204,12 +88,18 @@
pInstance->Params.VolumedB = 0;
/*
- * Set pointer to data and coef memory
+ * Create pointer to data and coef memory
*/
- pInstance->pData =
- (LVDBE_Data_FLOAT_t *)pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_DATA].pBaseAddress;
- pInstance->pCoef =
- (LVDBE_Coef_FLOAT_t *)pMemoryTable->Region[LVDBE_MEMREGION_PERSISTENT_COEF].pBaseAddress;
+ pInstance->pData = (LVDBE_Data_FLOAT_t *)calloc(1, sizeof(*(pInstance->pData)));
+ if (pInstance->pData == NULL)
+ {
+ return LVDBE_NULLADDRESS;
+ }
+ pInstance->pCoef = (LVDBE_Coef_FLOAT_t *)calloc(1, sizeof(*(pInstance->pCoef)));
+ if (pInstance->pCoef == NULL)
+ {
+ return LVDBE_NULLADDRESS;
+ }
/*
* Initialise the filters
@@ -278,3 +168,32 @@
return(LVDBE_SUCCESS);
}
+
+/****************************************************************************************/
+/* */
+/* FUNCTION: LVDBE_DeInit */
+/* */
+/* DESCRIPTION: */
+/* Free the memories created during LVDBE_Init including instance handle */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to instance handle */
+/* */
+/****************************************************************************************/
+void LVDBE_DeInit(LVDBE_Handle_t *phInstance)
+{
+ LVDBE_Instance_t *pInstance = (LVDBE_Instance_t *)*phInstance;
+ if (pInstance == LVM_NULL) {
+ return;
+ }
+ if (pInstance->pData != LVM_NULL) {
+ free(pInstance->pData);
+ pInstance->pData = LVM_NULL;
+ }
+ if (pInstance->pCoef != LVM_NULL) {
+ free(pInstance->pCoef);
+ pInstance->pCoef = LVM_NULL;
+ }
+ free(pInstance);
+ *phInstance = LVM_NULL;
+}
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
index f3faaed..377d20e 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
@@ -47,24 +47,6 @@
/* General */
#define LVDBE_INVALID 0xFFFF /* Invalid init parameter */
-/* Memory */
-#define LVDBE_MEMREGION_INSTANCE 0 /* Offset to the instance memory region */
-#define LVDBE_MEMREGION_PERSISTENT_DATA 1 /* Offset to persistent data memory region */
-#define LVDBE_MEMREGION_PERSISTENT_COEF 2 /* Offset to persistent coefficient region */
-#define LVDBE_MEMREGION_SCRATCH 3 /* Offset to data scratch memory region */
-
-#define LVDBE_INSTANCE_ALIGN 4 /* 32-bit alignment for structures */
-#define LVDBE_PERSISTENT_DATA_ALIGN 4 /* 32-bit alignment for data */
-#define LVDBE_PERSISTENT_COEF_ALIGN 4 /* 32-bit alignment for coef */
-#define LVDBE_SCRATCH_ALIGN 4 /* 32-bit alignment for long data */
-
-#ifdef SUPPORT_MC
-/* Number of buffers required for inplace processing */
-#define LVDBE_SCRATCHBUFFERS_INPLACE (LVM_MAX_CHANNELS * 3)
-#else
-#define LVDBE_SCRATCHBUFFERS_INPLACE 6 /* Number of buffers required for inplace processing */
-#endif
-
#define LVDBE_MIXER_TC 5 /* Mixer time */
#define LVDBE_BYPASS_MIXER_TC 100 /* Bypass mixer time */
@@ -100,13 +82,13 @@
typedef struct
{
/* Public parameters */
- LVDBE_MemTab_t MemoryTable; /* Instance memory allocation table */
LVDBE_Params_t Params; /* Instance parameters */
LVDBE_Capabilities_t Capabilities; /* Instance capabilities */
/* Data and coefficient pointers */
LVDBE_Data_FLOAT_t *pData; /* Instance data */
LVDBE_Coef_FLOAT_t *pCoef; /* Instance coefficients */
+ void *pScratch; /* scratch pointer */
} LVDBE_Instance_t;
/****************************************************************************************/
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
index b4a71c7..088de9f 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
@@ -81,18 +81,13 @@
LVDBE_Instance_t *pInstance =(LVDBE_Instance_t *)hInstance;
/*Extract number of Channels info*/
-#ifdef SUPPORT_MC
// Mono passed in as stereo
const LVM_INT32 NrChannels = pInstance->Params.NrChannels == 1
? 2 : pInstance->Params.NrChannels;
-#else
- const LVM_INT32 NrChannels = 2; // FCC_2
-#endif
const LVM_INT32 NrSamples = NrChannels * NrFrames;
/* Space to store DBE path computation */
- LVM_FLOAT * const pScratch =
- (LVM_FLOAT *)pInstance->MemoryTable.Region[LVDBE_MEMREGION_SCRATCH].pBaseAddress;
+ LVM_FLOAT * const pScratch = (LVM_FLOAT *)pInstance->pScratch;
/*
* Scratch for Mono path starts at offset of
@@ -136,33 +131,20 @@
*/
if (pInstance->Params.HPFSelect == LVDBE_HPF_ON)
{
-#ifdef SUPPORT_MC
BQ_MC_D32F32C30_TRC_WRA_01(&pInstance->pCoef->HPFInstance, /* Filter instance */
pScratch, /* Source */
pScratch, /* Destination */
(LVM_INT16)NrFrames,
(LVM_INT16)NrChannels);
-#else
- BQ_2I_D32F32C30_TRC_WRA_01(&pInstance->pCoef->HPFInstance,/* Filter instance */
- pScratch, /* Source */
- pScratch, /* Destination */
- (LVM_INT16)NrFrames);
-#endif
}
/*
* Create the mono stream
*/
-#ifdef SUPPORT_MC
FromMcToMono_Float(pScratch, /* Source */
pMono, /* Mono destination */
(LVM_INT16)NrFrames, /* Number of frames */
(LVM_INT16)NrChannels);
-#else
- From2iToMono_Float(pScratch, /* Stereo source */
- pMono, /* Mono destination */
- (LVM_INT16)NrFrames);
-#endif
/*
* Apply the band pass filter
@@ -175,20 +157,12 @@
/*
* Apply the AGC and mix
*/
-#ifdef SUPPORT_MC
AGC_MIX_VOL_Mc1Mon_D32_WRA(&pInstance->pData->AGCInstance, /* Instance pointer */
pScratch, /* Source */
pMono, /* Mono band pass source */
pScratch, /* Destination */
NrFrames, /* Number of frames */
NrChannels); /* Number of channels */
-#else
- AGC_MIX_VOL_2St1Mon_D32_WRA(&pInstance->pData->AGCInstance, /* Instance pointer */
- pScratch, /* Stereo source */
- pMono, /* Mono band pass source */
- pScratch, /* Stereo destination */
- NrFrames);
-#endif
for (LVM_INT32 ii = 0; ii < NrSamples; ++ii) {
//TODO: replace with existing clamping function
@@ -213,18 +187,11 @@
* The algorithm is disabled but volume management is required to compensate for
* headroom and volume (if enabled)
*/
-#ifdef SUPPORT_MC
LVC_MixSoft_Mc_D16C31_SAT(&pInstance->pData->BypassVolume,
pInData,
pScratchVol,
(LVM_INT16)NrFrames,
(LVM_INT16)NrChannels);
-#else
- LVC_MixSoft_1St_D16C31_SAT(&pInstance->pData->BypassVolume,
- pInData,
- pScratchVol,
- (LVM_INT16)NrSamples); /* Left and right, really # samples */
-#endif
} else {
// clear bypass volume path
memset(pScratchVol, 0, sizeof(*pScratchVol) * NrSamples);
@@ -233,19 +200,11 @@
/*
* Mix DBE processed path and bypass volume path
*/
-#ifdef SUPPORT_MC
LVC_MixSoft_2Mc_D16C31_SAT(&pInstance->pData->BypassMixer,
pScratch,
pScratchVol,
pOutData,
(LVM_INT16)NrFrames,
(LVM_INT16)NrChannels);
-#else
- LVC_MixSoft_2St_D16C31_SAT(&pInstance->pData->BypassMixer,
- pScratch,
- pScratchVol,
- pOutData,
- (LVM_INT16)NrSamples);
-#endif
return LVDBE_SUCCESS;
}
diff --git a/media/libeffects/lvm/lib/Bundle/lib/LVM.h b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
index e4e8450..783c3a0 100644
--- a/media/libeffects/lvm/lib/Bundle/lib/LVM.h
+++ b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
@@ -67,9 +67,6 @@
/* */
/****************************************************************************************/
-/* Memory table*/
-#define LVM_NR_MEMORY_REGIONS 4 /* Number of memory regions */
-
/* Concert Sound effect level presets */
#define LVM_CS_EFFECT_NONE 0 /* 0% effect, minimum value */
#define LVM_CS_EFFECT_LOW 16384 /* 50% effect */
@@ -225,12 +222,6 @@
/* */
/****************************************************************************************/
-/* Memory table containing the region definitions */
-typedef struct
-{
- LVM_MemoryRegion_st Region[LVM_NR_MEMORY_REGIONS]; /* One definition for each region */
-} LVM_MemTab_t;
-
/* N-Band equaliser band definition */
typedef struct
{
@@ -285,10 +276,8 @@
/* Spectrum Analyzer parameters Control */
LVM_PSA_Mode_en PSA_Enable;
LVM_PSA_DecaySpeed_en PSA_PeakDecayRate; /* Peak value decay rate*/
-#ifdef SUPPORT_MC
LVM_INT32 NrChannels;
LVM_INT32 ChMask;
-#endif
} LVM_ControlParams_t;
@@ -343,51 +332,14 @@
/****************************************************************************************/
/* */
-/* FUNCTION: LVM_GetMemoryTable */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) the memory */
-/* base address pointers are NULL on return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the memory */
-/* table returns the allocated memory and base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* pInstParams Pointer to the instance parameters */
-/* */
-/* RETURNS: */
-/* LVM_SUCCESS Succeeded */
-/* LVM_NULLADDRESS When one of pMemoryTable or pInstParams is NULL */
-/* LVM_OUTOFRANGE When any of the Instance parameters are out of range */
-/* */
-/* NOTES: */
-/* 1. This function may be interrupted by the LVM_Process function */
-/* */
-/****************************************************************************************/
-LVM_ReturnStatus_en LVM_GetMemoryTable(LVM_Handle_t hInstance,
- LVM_MemTab_t *pMemoryTable,
- LVM_InstParams_t *pInstParams);
-
-/****************************************************************************************/
-/* */
/* FUNCTION: LVM_GetInstanceHandle */
/* */
/* DESCRIPTION: */
-/* This function is used to create a bundle instance. It returns the created instance */
-/* handle through phInstance. All parameters are set to their default, inactive state. */
+/* This function is used to create a bundle instance. */
+/* All parameters are set to their default, inactive state. */
/* */
/* PARAMETERS: */
-/* phInstance pointer to the instance handle */
-/* pMemoryTable Pointer to the memory definition table */
+/* phInstance Pointer to the instance handle */
/* pInstParams Pointer to the instance parameters */
/* */
/* RETURNS: */
@@ -400,11 +352,27 @@
/* */
/****************************************************************************************/
LVM_ReturnStatus_en LVM_GetInstanceHandle(LVM_Handle_t *phInstance,
- LVM_MemTab_t *pMemoryTable,
LVM_InstParams_t *pInstParams);
/****************************************************************************************/
/* */
+/* FUNCTION: LVM_DelInstanceHandle */
+/* */
+/* DESCRIPTION: */
+/* This function is used to create a bundle instance. It returns the created instance */
+/* handle through phInstance. All parameters are set to their default, inactive state. */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to the instance handle */
+/* */
+/* NOTES: */
+/* 1. This function must not be interrupted by the LVM_Process function */
+/* */
+/****************************************************************************************/
+void LVM_DelInstanceHandle(LVM_Handle_t *phInstance);
+
+/****************************************************************************************/
+/* */
/* FUNCTION: LVM_ClearAudioBuffers */
/* */
/* DESCRIPTION: */
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.cpp
index 3aeddbb..4c25ce0 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.cpp
@@ -62,11 +62,7 @@
LVM_Instance_t *pInstance = (LVM_Instance_t *)hInstance;
LVM_Buffer_t *pBuffer;
LVM_FLOAT *pDest;
-#ifdef SUPPORT_MC
LVM_INT16 NumChannels = pInstance->NrChannels;
-#else
- LVM_INT16 NumChannels = 2;
-#endif
/*
* Set the processing address pointers
@@ -388,11 +384,9 @@
LVM_INT16 NumSamples;
LVM_FLOAT *pStart;
LVM_FLOAT *pDest;
-#ifdef SUPPORT_MC
LVM_INT32 NrChannels = pInstance->NrChannels;
#define NrFrames NumSamples // alias for clarity
#define FrameCount SampleCount
-#endif
/*
* Set the pointers
@@ -426,25 +420,15 @@
/*
* Copy all output delay samples to the output
*/
-#ifdef SUPPORT_MC
Copy_Float(&pBuffer->OutDelayBuffer[0], /* Source */
pDest, /* Destination */
/* Number of delay samples */
(LVM_INT16)(NrChannels * pBuffer->OutDelaySamples));
-#else
- Copy_Float(&pBuffer->OutDelayBuffer[0], /* Source */
- pDest, /* Destination */
- (LVM_INT16)(2 * pBuffer->OutDelaySamples)); /* Number of delay samples */
-#endif
/*
* Update the pointer and sample counts
*/
-#ifdef SUPPORT_MC
pDest += NrChannels * pBuffer->OutDelaySamples; /* Output sample pointer */
-#else
- pDest += 2 * pBuffer->OutDelaySamples; /* Output sample pointer */
-#endif
NumSamples = (LVM_INT16)(NumSamples - pBuffer->OutDelaySamples); /* Samples left \
to send */
pBuffer->OutDelaySamples = 0; /* No samples left in the buffer */
@@ -454,40 +438,24 @@
/*
* Copy only some of the ouput delay samples to the output
*/
-#ifdef SUPPORT_MC
Copy_Float(&pBuffer->OutDelayBuffer[0], /* Source */
pDest, /* Destination */
(LVM_INT16)(NrChannels * NrFrames)); /* Number of delay samples */
-#else
- Copy_Float(&pBuffer->OutDelayBuffer[0], /* Source */
- pDest, /* Destination */
- (LVM_INT16)(2 * NumSamples)); /* Number of delay samples */
-#endif
/*
* Update the pointer and sample counts
*/
-#ifdef SUPPORT_MC
pDest += NrChannels * NrFrames; /* Output sample pointer */
-#else
- pDest += 2 * NumSamples; /* Output sample pointer */
-#endif
/* No samples left in the buffer */
pBuffer->OutDelaySamples = (LVM_INT16)(pBuffer->OutDelaySamples - NumSamples);
/*
* Realign the delay buffer data to avoid using circular buffer management
*/
-#ifdef SUPPORT_MC
Copy_Float(&pBuffer->OutDelayBuffer[NrChannels * NrFrames], /* Source */
&pBuffer->OutDelayBuffer[0], /* Destination */
/* Number of samples to move */
(LVM_INT16)(NrChannels * pBuffer->OutDelaySamples));
-#else
- Copy_Float(&pBuffer->OutDelayBuffer[2 * NumSamples], /* Source */
- &pBuffer->OutDelayBuffer[0], /* Destination */
- (LVM_INT16)(2 * pBuffer->OutDelaySamples)); /* Number of samples to move */
-#endif
NumSamples = 0; /* Samples left to send */
}
}
@@ -503,23 +471,13 @@
/*
* Copy all processed samples to the output
*/
-#ifdef SUPPORT_MC
Copy_Float(pStart, /* Source */
pDest, /* Destination */
(LVM_INT16)(NrChannels * FrameCount)); /* Number of processed samples */
-#else
- Copy_Float(pStart, /* Source */
- pDest, /* Destination */
- (LVM_INT16)(2 * SampleCount)); /* Number of processed samples */
-#endif
/*
* Update the pointer and sample counts
*/
-#ifdef SUPPORT_MC
pDest += NrChannels * FrameCount; /* Output sample pointer */
-#else
- pDest += 2 * SampleCount; /* Output sample pointer */
-#endif
NumSamples = (LVM_INT16)(NumSamples - SampleCount); /* Samples left to send */
SampleCount = 0; /* No samples left in the buffer */
}
@@ -528,25 +486,14 @@
/*
* Copy only some processed samples to the output
*/
-#ifdef SUPPORT_MC
Copy_Float(pStart, /* Source */
pDest, /* Destination */
(LVM_INT16)(NrChannels * NrFrames)); /* Number of processed samples */
-#else
- Copy_Float(pStart, /* Source */
- pDest, /* Destination */
- (LVM_INT16)(2 * NumSamples)); /* Number of processed samples */
-#endif
/*
* Update the pointers and sample counts
*/
-#ifdef SUPPORT_MC
pStart += NrChannels * NrFrames; /* Processed sample pointer */
pDest += NrChannels * NrFrames; /* Output sample pointer */
-#else
- pStart += 2 * NumSamples; /* Processed sample pointer */
- pDest += 2 * NumSamples; /* Output sample pointer */
-#endif
SampleCount = (LVM_INT16)(SampleCount - NumSamples); /* Processed samples left */
NumSamples = 0; /* Clear the sample count */
}
@@ -557,16 +504,10 @@
*/
if (SampleCount != 0)
{
-#ifdef SUPPORT_MC
Copy_Float(pStart, /* Source */
/* Destination */
&pBuffer->OutDelayBuffer[NrChannels * pBuffer->OutDelaySamples],
(LVM_INT16)(NrChannels * FrameCount)); /* Number of processed samples */
-#else
- Copy_Float(pStart, /* Source */
- &pBuffer->OutDelayBuffer[2 * pBuffer->OutDelaySamples], /* Destination */
- (LVM_INT16)(2 * SampleCount)); /* Number of processed samples */
-#endif
/* Update the buffer count */
pBuffer->OutDelaySamples = (LVM_INT16)(pBuffer->OutDelaySamples + SampleCount);
}
@@ -606,7 +547,6 @@
{
LVM_Instance_t *pInstance = (LVM_Instance_t *)hInstance;
-#ifdef SUPPORT_MC
LVM_INT16 NumChannels = pInstance->NrChannels;
if (NumChannels == 1)
{
@@ -615,19 +555,12 @@
}
#undef NrFrames
#define NrFrames (*pNumSamples) // alias for clarity
-#else
- LVM_INT16 NumChannels = 2;
-#endif
/*
* Update sample counts
*/
pInstance->pInputSamples += (LVM_INT16)(*pNumSamples * NumChannels); /* Update the I/O pointers */
-#ifdef SUPPORT_MC
pInstance->pOutputSamples += (LVM_INT16)(NrFrames * NumChannels);
-#else
- pInstance->pOutputSamples += (LVM_INT16)(*pNumSamples * 2);
-#endif
pInstance->SamplesToProcess = (LVM_INT16)(pInstance->SamplesToProcess - *pNumSamples); /* Update the sample count */
/*
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.cpp
index ff2c90a..bb3652e 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.cpp
@@ -70,23 +70,17 @@
(pParams->SampleRate != LVM_FS_32000) && (pParams->SampleRate != LVM_FS_44100) && (pParams->SampleRate != LVM_FS_48000) &&
(pParams->SampleRate != LVM_FS_88200) && (pParams->SampleRate != LVM_FS_96000) &&
(pParams->SampleRate != LVM_FS_176400) && (pParams->SampleRate != LVM_FS_192000)) ||
-#ifdef SUPPORT_MC
((pParams->SourceFormat != LVM_STEREO) &&
(pParams->SourceFormat != LVM_MONOINSTEREO) &&
(pParams->SourceFormat != LVM_MONO) &&
(pParams->SourceFormat != LVM_MULTICHANNEL)) ||
-#else
- ((pParams->SourceFormat != LVM_STEREO) && (pParams->SourceFormat != LVM_MONOINSTEREO) && (pParams->SourceFormat != LVM_MONO)) ||
-#endif
(pParams->SpeakerType > LVM_EX_HEADPHONES))
{
return (LVM_OUTOFRANGE);
}
-#ifdef SUPPORT_MC
pInstance->Params.NrChannels = pParams->NrChannels;
pInstance->Params.ChMask = pParams->ChMask;
-#endif
/*
* Cinema Sound parameters
*/
@@ -528,10 +522,8 @@
} while ((pInstance->ControlPending != LVM_FALSE) &&
(Count > 0));
-#ifdef SUPPORT_MC
pInstance->NrChannels = LocalParams.NrChannels;
pInstance->ChMask = LocalParams.ChMask;
-#endif
/* Clear all internal data if format change*/
if(LocalParams.SourceFormat != pInstance->Params.SourceFormat)
@@ -638,9 +630,7 @@
DBE_Params.HeadroomdB = 0;
DBE_Params.VolumeControl = LVDBE_VOLUME_OFF;
DBE_Params.VolumedB = 0;
-#ifdef SUPPORT_MC
DBE_Params.NrChannels = LocalParams.NrChannels;
-#endif
/*
* Make the changes
@@ -690,7 +680,6 @@
{
EQNB_Params.SourceFormat = LVEQNB_STEREO;
}
-#ifdef SUPPORT_MC
/* Note: Currently SourceFormat field of EQNB is not been
* used by the module.
*/
@@ -698,14 +687,11 @@
{
EQNB_Params.SourceFormat = LVEQNB_MULTICHANNEL;
}
-#endif
else
{
EQNB_Params.SourceFormat = LVEQNB_MONOINSTEREO; /* Force to Mono-in-Stereo mode */
}
-#ifdef SUPPORT_MC
EQNB_Params.NrChannels = LocalParams.NrChannels;
-#endif
/*
* Set the control flag
@@ -766,16 +752,12 @@
CS_Params.SpeakerType = LVCS_HEADPHONES;
}
-#ifdef SUPPORT_MC
/* Concert sound module processes only the left and right channels
* data. So the Source Format is set to LVCS_STEREO for multichannel
* input also.
*/
if (LocalParams.SourceFormat == LVM_STEREO ||
LocalParams.SourceFormat == LVM_MULTICHANNEL)
-#else
- if (LocalParams.SourceFormat == LVM_STEREO) /* Mono format not supported */
-#endif
{
CS_Params.SourceFormat = LVCS_STEREO;
}
@@ -786,9 +768,7 @@
CS_Params.SampleRate = LocalParams.SampleRate;
CS_Params.ReverbLevel = LocalParams.VirtualizerReverbLevel;
CS_Params.EffectLevel = LocalParams.CS_EffectLevel;
-#ifdef SUPPORT_MC
CS_Params.NrChannels = LocalParams.NrChannels;
-#endif
/*
* Set the control flag
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
index 5620529..58c18dd 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
@@ -20,6 +20,7 @@
/* Includes */
/* */
/************************************************************************************/
+#include <stdlib.h>
#include "LVM_Private.h"
#include "LVM_Tables.h"
@@ -28,479 +29,31 @@
/****************************************************************************************/
/* */
-/* FUNCTION: LVM_GetMemoryTable */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) the memory */
-/* base address pointers are NULL on return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the memory */
-/* table returns the allocated memory and base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* pCapabilities Pointer to the default capabilities */
-/* */
-/* RETURNS: */
-/* LVM_SUCCESS Succeeded */
-/* LVM_NULLADDRESS When one of pMemoryTable or pInstParams is NULL */
-/* LVM_OUTOFRANGE When any of the Instance parameters are out of range */
-/* */
-/* NOTES: */
-/* 1. This function may be interrupted by the LVM_Process function */
-/* 2. The scratch memory is the largest required by any of the sub-modules plus any */
-/* additional scratch requirements of the bundle */
-/* */
-/****************************************************************************************/
-
-/*
- * 4 Types of Memory Regions of LVM
- * TODO: Allocate on the fly.
- * i) LVM_MEMREGION_PERSISTENT_SLOW_DATA - For Instance Handles
- * ii) LVM_MEMREGION_PERSISTENT_FAST_DATA - Persistent Buffers
- * iii) LVM_MEMREGION_PERSISTENT_FAST_COEF - For Holding Structure values
- * iv) LVM_MEMREGION_TEMPORARY_FAST - For Holding Structure values
- *
- * LVM_MEMREGION_PERSISTENT_SLOW_DATA:
- * Total Memory size:
- * sizeof(LVM_Instance_t) + \
- * sizeof(LVM_Buffer_t) + \
- * sizeof(LVPSA_InstancePr_t) + \
- * sizeof(LVM_Buffer_t) - needed if buffer mode is LVM_MANAGED_BUFFER
- *
- * LVM_MEMREGION_PERSISTENT_FAST_DATA:
- * Total Memory size:
- * sizeof(LVM_TE_Data_t) + \
- * 2 * pInstParams->EQNB_NumBands * sizeof(LVM_EQNB_BandDef_t) + \
- * sizeof(LVCS_Data_t) + \
- * sizeof(LVDBE_Data_FLOAT_t) + \
- * sizeof(Biquad_2I_Order2_FLOAT_Taps_t) + \
- * sizeof(Biquad_2I_Order2_FLOAT_Taps_t) + \
- * pInstParams->EQNB_NumBands * sizeof(Biquad_2I_Order2_FLOAT_Taps_t) + \
- * pInstParams->EQNB_NumBands * sizeof(LVEQNB_BandDef_t) + \
- * pInstParams->EQNB_NumBands * sizeof(LVEQNB_BiquadType_en) + \
- * 2 * LVM_HEADROOM_MAX_NBANDS * sizeof(LVM_HeadroomBandDef_t) + \
- * PSA_InitParams.nBands * sizeof(Biquad_1I_Order2_Taps_t) + \
- * PSA_InitParams.nBands * sizeof(QPD_Taps_t)
- *
- * LVM_MEMREGION_PERSISTENT_FAST_COEF:
- * Total Memory size:
- * sizeof(LVM_TE_Coefs_t) + \
- * sizeof(LVCS_Coefficient_t) + \
- * sizeof(LVDBE_Coef_FLOAT_t) + \
- * sizeof(Biquad_FLOAT_Instance_t) + \
- * sizeof(Biquad_FLOAT_Instance_t) + \
- * pInstParams->EQNB_NumBands * sizeof(Biquad_FLOAT_Instance_t) + \
- * PSA_InitParams.nBands * sizeof(Biquad_Instance_t) + \
- * PSA_InitParams.nBands * sizeof(QPD_State_t)
- *
- * LVM_MEMREGION_TEMPORARY_FAST (Scratch):
- * Total Memory Size:
- * BundleScratchSize + \
- * MAX_INTERNAL_BLOCKSIZE * sizeof(LVM_FLOAT) + \
- * MaxScratchOf (CS, EQNB, DBE, PSA)
- *
- * a)BundleScratchSize:
- * 3 * LVM_MAX_CHANNELS \
- * * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) * sizeof(LVM_FLOAT)
- * This Memory is allocated only when Buffer mode is LVM_MANAGED_BUFFER.
- * b)MaxScratchOf (CS, EQNB, DBE, PSA)
- * This Memory is needed for scratch usage for CS, EQNB, DBE, PSA.
- * CS = (LVCS_SCRATCHBUFFERS * sizeof(LVM_FLOAT)
- * * pCapabilities->MaxBlockSize)
- * EQNB = (LVEQNB_SCRATCHBUFFERS * sizeof(LVM_FLOAT)
- * * pCapabilities->MaxBlockSize)
- * DBE = (LVDBE_SCRATCHBUFFERS_INPLACE*sizeof(LVM_FLOAT)
- * * pCapabilities->MaxBlockSize)
- * PSA = (2 * pInitParams->MaxInputBlockSize * sizeof(LVM_FLOAT))
- * one MaxInputBlockSize for input and another for filter output
- * c)MAX_INTERNAL_BLOCKSIZE
- * This Memory is needed for PSAInput - Temp memory to store output
- * from McToMono block and given as input to PSA block
- */
-
-LVM_ReturnStatus_en LVM_GetMemoryTable(LVM_Handle_t hInstance,
- LVM_MemTab_t *pMemoryTable,
- LVM_InstParams_t *pInstParams)
-{
-
- LVM_Instance_t *pInstance = (LVM_Instance_t *)hInstance;
- LVM_UINT32 AlgScratchSize;
- LVM_UINT32 BundleScratchSize;
- LVM_UINT16 InternalBlockSize;
- INST_ALLOC AllocMem[LVM_NR_MEMORY_REGIONS];
- LVM_INT16 i;
-
- /*
- * Check parameters
- */
- if(pMemoryTable == LVM_NULL)
- {
- return LVM_NULLADDRESS;
- }
-
- /*
- * Return memory table if the instance has already been created
- */
- if (hInstance != LVM_NULL)
- {
- /* Read back memory allocation table */
- *pMemoryTable = pInstance->MemoryTable;
- return(LVM_SUCCESS);
- }
-
- if(pInstParams == LVM_NULL)
- {
- return LVM_NULLADDRESS;
- }
-
- /*
- * Power Spectrum Analyser
- */
- if(pInstParams->PSA_Included > LVM_PSA_ON)
- {
- return (LVM_OUTOFRANGE);
- }
-
- /*
- * Check the instance parameters
- */
- if( (pInstParams->BufferMode != LVM_MANAGED_BUFFERS) && (pInstParams->BufferMode != LVM_UNMANAGED_BUFFERS) )
- {
- return (LVM_OUTOFRANGE);
- }
-
- /* N-Band Equalizer */
- if( pInstParams->EQNB_NumBands > 32 )
- {
- return (LVM_OUTOFRANGE);
- }
-
- if(pInstParams->BufferMode == LVM_MANAGED_BUFFERS)
- {
- if( (pInstParams->MaxBlockSize < LVM_MIN_MAXBLOCKSIZE ) || (pInstParams->MaxBlockSize > LVM_MANAGED_MAX_MAXBLOCKSIZE ) )
- {
- return (LVM_OUTOFRANGE);
- }
- }
- else
- {
- if( (pInstParams->MaxBlockSize < LVM_MIN_MAXBLOCKSIZE ) || (pInstParams->MaxBlockSize > LVM_UNMANAGED_MAX_MAXBLOCKSIZE) )
- {
- return (LVM_OUTOFRANGE);
- }
- }
-
- /*
- * Initialise the AllocMem structures
- */
- for (i=0; i<LVM_NR_MEMORY_REGIONS; i++)
- {
- InstAlloc_Init(&AllocMem[i], LVM_NULL);
- }
- InternalBlockSize = (LVM_UINT16)((pInstParams->MaxBlockSize) & MIN_INTERNAL_BLOCKMASK); /* Force to a multiple of MIN_INTERNAL_BLOCKSIZE */
-
- if (InternalBlockSize < MIN_INTERNAL_BLOCKSIZE)
- {
- InternalBlockSize = MIN_INTERNAL_BLOCKSIZE;
- }
-
- /* Maximum Internal Black Size should not be more than MAX_INTERNAL_BLOCKSIZE*/
- if(InternalBlockSize > MAX_INTERNAL_BLOCKSIZE)
- {
- InternalBlockSize = MAX_INTERNAL_BLOCKSIZE;
- }
-
- /*
- * Bundle requirements
- */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
- sizeof(LVM_Instance_t));
-
- /*
- * Set the algorithm and bundle scratch requirements
- */
- AlgScratchSize = 0;
- if (pInstParams->BufferMode == LVM_MANAGED_BUFFERS)
- {
- BundleScratchSize = 3 * LVM_MAX_CHANNELS \
- * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) \
- * sizeof(LVM_FLOAT);
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST], /* Scratch buffer */
- BundleScratchSize);
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
- sizeof(LVM_Buffer_t));
- }
-
- /*
- * Treble Enhancement requirements
- */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- sizeof(LVM_TE_Data_t));
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- sizeof(LVM_TE_Coefs_t));
-
- /*
- * N-Band Equalizer requirements
- */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA], /* Local storage */
- (pInstParams->EQNB_NumBands * sizeof(LVM_EQNB_BandDef_t)));
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA], /* User storage */
- (pInstParams->EQNB_NumBands * sizeof(LVM_EQNB_BandDef_t)));
-
- /*
- * Concert Sound requirements
- */
- {
- LVCS_MemTab_t CS_MemTab;
- LVCS_Capabilities_t CS_Capabilities;
-
- /*
- * Set the capabilities
- */
- CS_Capabilities.MaxBlockSize = InternalBlockSize;
-
- /*
- * Get the memory requirements
- */
- LVCS_Memory(LVM_NULL,
- &CS_MemTab,
- &CS_Capabilities);
-
- /*
- * Update the memory allocation structures
- */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- CS_MemTab.Region[LVM_MEMREGION_PERSISTENT_FAST_DATA].Size);
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- CS_MemTab.Region[LVM_MEMREGION_PERSISTENT_FAST_COEF].Size);
- if (CS_MemTab.Region[LVM_MEMREGION_TEMPORARY_FAST].Size > AlgScratchSize) AlgScratchSize = CS_MemTab.Region[LVM_MEMREGION_TEMPORARY_FAST].Size;
-
- }
-
- /*
- * Dynamic Bass Enhancement requirements
- */
- {
- LVDBE_MemTab_t DBE_MemTab;
- LVDBE_Capabilities_t DBE_Capabilities;
-
- /*
- * Set the capabilities
- */
- DBE_Capabilities.SampleRate = LVDBE_CAP_FS_8000 | LVDBE_CAP_FS_11025 |
- LVDBE_CAP_FS_12000 | LVDBE_CAP_FS_16000 |
- LVDBE_CAP_FS_22050 | LVDBE_CAP_FS_24000 |
- LVDBE_CAP_FS_32000 | LVDBE_CAP_FS_44100 |
- LVDBE_CAP_FS_48000 | LVDBE_CAP_FS_88200 |
- LVDBE_CAP_FS_96000 | LVDBE_CAP_FS_176400 |
- LVDBE_CAP_FS_192000;
- DBE_Capabilities.CentreFrequency = LVDBE_CAP_CENTRE_55Hz | LVDBE_CAP_CENTRE_55Hz | LVDBE_CAP_CENTRE_66Hz | LVDBE_CAP_CENTRE_78Hz | LVDBE_CAP_CENTRE_90Hz;
- DBE_Capabilities.MaxBlockSize = InternalBlockSize;
-
- /*
- * Get the memory requirements
- */
- LVDBE_Memory(LVM_NULL,
- &DBE_MemTab,
-
- &DBE_Capabilities);
- /*
- * Update the bundle table
- */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- DBE_MemTab.Region[LVM_MEMREGION_PERSISTENT_FAST_DATA].Size);
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- DBE_MemTab.Region[LVM_MEMREGION_PERSISTENT_FAST_COEF].Size);
- if (DBE_MemTab.Region[LVM_MEMREGION_TEMPORARY_FAST].Size > AlgScratchSize) AlgScratchSize = DBE_MemTab.Region[LVM_MEMREGION_TEMPORARY_FAST].Size;
-
- }
-
- /*
- * N-Band equaliser requirements
- */
- {
- LVEQNB_MemTab_t EQNB_MemTab; /* For N-Band Equaliser */
- LVEQNB_Capabilities_t EQNB_Capabilities;
-
- /*
- * Set the capabilities
- */
- EQNB_Capabilities.SampleRate = LVEQNB_CAP_FS_8000 | LVEQNB_CAP_FS_11025 |
- LVEQNB_CAP_FS_12000 | LVEQNB_CAP_FS_16000 |
- LVEQNB_CAP_FS_22050 | LVEQNB_CAP_FS_24000 |
- LVEQNB_CAP_FS_32000 | LVEQNB_CAP_FS_44100 |
- LVEQNB_CAP_FS_48000 | LVEQNB_CAP_FS_88200 |
- LVEQNB_CAP_FS_96000 | LVEQNB_CAP_FS_176400 |
- LVEQNB_CAP_FS_192000;
- EQNB_Capabilities.SourceFormat = LVEQNB_CAP_STEREO | LVEQNB_CAP_MONOINSTEREO;
- EQNB_Capabilities.MaxBlockSize = InternalBlockSize;
- EQNB_Capabilities.MaxBands = pInstParams->EQNB_NumBands;
-
- /*
- * Get the memory requirements
- */
- LVEQNB_Memory(LVM_NULL,
- &EQNB_MemTab,
- &EQNB_Capabilities);
-
- /*
- * Update the bundle table
- */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- EQNB_MemTab.Region[LVM_MEMREGION_PERSISTENT_FAST_DATA].Size);
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- EQNB_MemTab.Region[LVM_MEMREGION_PERSISTENT_FAST_COEF].Size);
- if (EQNB_MemTab.Region[LVM_MEMREGION_TEMPORARY_FAST].Size > AlgScratchSize) AlgScratchSize = EQNB_MemTab.Region[LVM_MEMREGION_TEMPORARY_FAST].Size;
-
- }
-
- /*
- * Headroom management memory allocation
- */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- (LVM_HEADROOM_MAX_NBANDS * sizeof(LVM_HeadroomBandDef_t)));
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- (LVM_HEADROOM_MAX_NBANDS * sizeof(LVM_HeadroomBandDef_t)));
-
- /*
- * Spectrum Analyzer memory requirements
- */
- {
- pLVPSA_Handle_t hPSAInst = LVM_NULL;
- LVPSA_MemTab_t PSA_MemTab;
- LVPSA_InitParams_t PSA_InitParams;
- LVPSA_FilterParam_t FiltersParams[9];
- LVPSA_RETURN PSA_Status;
-
- if(pInstParams->PSA_Included == LVM_PSA_ON)
- {
- PSA_InitParams.SpectralDataBufferDuration = (LVM_UINT16) 500;
- PSA_InitParams.MaxInputBlockSize = (LVM_UINT16) 1000;
- PSA_InitParams.nBands = (LVM_UINT16) 9;
-
- PSA_InitParams.pFiltersParams = &FiltersParams[0];
- for(i = 0; i < PSA_InitParams.nBands; i++)
- {
- FiltersParams[i].CenterFrequency = (LVM_UINT16) 1000;
- FiltersParams[i].QFactor = (LVM_UINT16) 25;
- FiltersParams[i].PostGain = (LVM_INT16) 0;
- }
-
- /*
- * Get the memory requirements
- */
- PSA_Status = LVPSA_Memory (hPSAInst,
- &PSA_MemTab,
- &PSA_InitParams);
-
- if (PSA_Status != LVPSA_OK)
- {
- return((LVM_ReturnStatus_en) LVM_ALGORITHMPSA);
- }
-
- /*
- * Update the bundle table
- */
- /* Slow Data */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
- PSA_MemTab.Region[LVM_PERSISTENT_SLOW_DATA].Size);
-
- /* Fast Data */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- PSA_MemTab.Region[LVM_PERSISTENT_FAST_DATA].Size);
-
- /* Fast Coef */
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- PSA_MemTab.Region[LVM_PERSISTENT_FAST_COEF].Size);
-
- /* Fast Temporary */
- InstAlloc_AddMember(&AllocMem[LVM_TEMPORARY_FAST],
- MAX_INTERNAL_BLOCKSIZE * sizeof(LVM_FLOAT));
-
- if (PSA_MemTab.Region[LVM_TEMPORARY_FAST].Size > AlgScratchSize)
- {
- AlgScratchSize = PSA_MemTab.Region[LVM_TEMPORARY_FAST].Size;
- }
- }
- }
-
- /*
- * Return the memory table
- */
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_SLOW_DATA].Size = InstAlloc_GetTotal(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA]);
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_SLOW_DATA].Type = LVM_PERSISTENT_SLOW_DATA;
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_SLOW_DATA].pBaseAddress = LVM_NULL;
-
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_DATA].Size = InstAlloc_GetTotal(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA]);
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_DATA].Type = LVM_PERSISTENT_FAST_DATA;
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_DATA].pBaseAddress = LVM_NULL;
- if (pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_DATA].Size < 4)
- {
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_DATA].Size = 0;
- }
-
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_COEF].Size = InstAlloc_GetTotal(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF]);
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_COEF].Type = LVM_PERSISTENT_FAST_COEF;
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_COEF].pBaseAddress = LVM_NULL;
- if (pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_COEF].Size < 4)
- {
- pMemoryTable->Region[LVM_MEMREGION_PERSISTENT_FAST_COEF].Size = 0;
- }
-
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST],
- AlgScratchSize);
- pMemoryTable->Region[LVM_MEMREGION_TEMPORARY_FAST].Size = InstAlloc_GetTotal(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST]);
- pMemoryTable->Region[LVM_MEMREGION_TEMPORARY_FAST].Type = LVM_TEMPORARY_FAST;
- pMemoryTable->Region[LVM_MEMREGION_TEMPORARY_FAST].pBaseAddress = LVM_NULL;
- if (pMemoryTable->Region[LVM_MEMREGION_TEMPORARY_FAST].Size < 4)
- {
- pMemoryTable->Region[LVM_MEMREGION_TEMPORARY_FAST].Size = 0;
- }
-
- return(LVM_SUCCESS);
-
-}
-
-/****************************************************************************************/
-/* */
/* FUNCTION: LVM_GetInstanceHandle */
/* */
/* DESCRIPTION: */
-/* This function is used to create a bundle instance. It returns the created instance */
-/* handle through phInstance. All parameters are set to their default, inactive state. */
+/* This function is used to create a bundle instance. */
+/* All parameters are set to their default, inactive state. */
/* */
/* PARAMETERS: */
-/* phInstance pointer to the instance handle */
-/* pMemoryTable Pointer to the memory definition table */
-/* pInstParams Pointer to the initialisation capabilities */
+/* phInstance Pointer to the instance handle */
+/* pInstParams Pointer to the instance parameters */
/* */
/* RETURNS: */
/* LVM_SUCCESS Initialisation succeeded */
+/* LVM_NULLADDRESS One or more memory has a NULL pointer */
/* LVM_OUTOFRANGE When any of the Instance parameters are out of range */
-/* LVM_NULLADDRESS When one of phInstance, pMemoryTable or pInstParams are NULL*/
/* */
/* NOTES: */
/* 1. This function must not be interrupted by the LVM_Process function */
/* */
/****************************************************************************************/
-
LVM_ReturnStatus_en LVM_GetInstanceHandle(LVM_Handle_t *phInstance,
- LVM_MemTab_t *pMemoryTable,
LVM_InstParams_t *pInstParams)
{
LVM_ReturnStatus_en Status = LVM_SUCCESS;
LVM_Instance_t *pInstance;
- INST_ALLOC AllocMem[LVM_NR_MEMORY_REGIONS];
LVM_INT16 i;
LVM_UINT16 InternalBlockSize;
LVM_INT32 BundleScratchSize;
@@ -508,24 +61,12 @@
/*
* Check valid points have been given
*/
- if ((phInstance == LVM_NULL) || (pMemoryTable == LVM_NULL) || (pInstParams == LVM_NULL))
+ if ((phInstance == LVM_NULL) || (pInstParams == LVM_NULL))
{
return (LVM_NULLADDRESS);
}
/*
- * Check the memory table for NULL pointers
- */
- for (i=0; i<LVM_NR_MEMORY_REGIONS; i++)
- {
- if ((pMemoryTable->Region[i].Size != 0) &&
- (pMemoryTable->Region[i].pBaseAddress==LVM_NULL))
- {
- return(LVM_NULLADDRESS);
- }
- }
-
- /*
* Check the instance parameters
*/
if( (pInstParams->BufferMode != LVM_MANAGED_BUFFERS) && (pInstParams->BufferMode != LVM_UNMANAGED_BUFFERS) )
@@ -559,29 +100,19 @@
}
/*
- * Initialise the AllocMem structures
+ * Create the instance handle
*/
- for (i=0; i<LVM_NR_MEMORY_REGIONS; i++)
+ *phInstance = (LVM_Handle_t)calloc(1, sizeof(*pInstance));
+ if (*phInstance == LVM_NULL)
{
- InstAlloc_Init(&AllocMem[i],
- pMemoryTable->Region[i].pBaseAddress);
+ return LVM_NULLADDRESS;
}
+ pInstance = (LVM_Instance_t *)*phInstance;
+
+ pInstance->InstParams = *pInstParams;
/*
- * Set the instance handle
- */
- *phInstance = (LVM_Handle_t)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
- sizeof(LVM_Instance_t));
- pInstance =(LVM_Instance_t *)*phInstance;
-
- /*
- * Save the memory table, parameters and capabilities
- */
- pInstance->MemoryTable = *pMemoryTable;
- pInstance->InstParams = *pInstParams;
-
- /*
- * Set the bundle scratch memory and initialse the buffer management
+ * Create the bundle scratch memory and initialse the buffer management
*/
InternalBlockSize = (LVM_UINT16)((pInstParams->MaxBlockSize) & MIN_INTERNAL_BLOCKMASK); /* Force to a multiple of MIN_INTERNAL_BLOCKSIZE */
if (InternalBlockSize < MIN_INTERNAL_BLOCKSIZE)
@@ -600,23 +131,31 @@
* Common settings for managed and unmanaged buffers
*/
pInstance->SamplesToProcess = 0; /* No samples left to process */
+ BundleScratchSize = (LVM_INT32)
+ (3 * LVM_MAX_CHANNELS \
+ * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) \
+ * sizeof(LVM_FLOAT));
+ pInstance->pScratch = calloc(1, BundleScratchSize);
+ if (pInstance->pScratch == LVM_NULL)
+ {
+ return LVM_NULLADDRESS;
+ }
+
if (pInstParams->BufferMode == LVM_MANAGED_BUFFERS)
{
/*
* Managed buffers required
*/
pInstance->pBufferManagement = (LVM_Buffer_t *)
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
- sizeof(LVM_Buffer_t));
- BundleScratchSize = (LVM_INT32)
- (3 * LVM_MAX_CHANNELS \
- * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) \
- * sizeof(LVM_FLOAT));
- pInstance->pBufferManagement->pScratch = (LVM_FLOAT *)
- InstAlloc_AddMember(
- &AllocMem[LVM_MEMREGION_TEMPORARY_FAST], /* Scratch 1 buffer */
- (LVM_UINT32)BundleScratchSize);
- LoadConst_Float(0, /* Clear the input delay buffer */
+ calloc(1, sizeof(*(pInstance->pBufferManagement)));
+ if (pInstance->pBufferManagement == LVM_NULL)
+ {
+ return LVM_NULLADDRESS;
+ }
+
+ pInstance->pBufferManagement->pScratch = (LVM_FLOAT *)pInstance->pScratch;
+
+ LoadConst_Float(0, /* Clear the input delay buffer */
(LVM_FLOAT *)&pInstance->pBufferManagement->InDelayBuffer,
(LVM_INT16)(LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE));
pInstance->pBufferManagement->InDelaySamples = MIN_INTERNAL_BLOCKSIZE; /* Set the number of delay samples */
@@ -642,20 +181,21 @@
/*
* DC removal filter
*/
-#ifdef SUPPORT_MC
DC_Mc_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
-#else
- DC_2I_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
-#endif
/*
* Treble Enhancement
*/
- pInstance->pTE_Taps = (LVM_TE_Data_t *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- sizeof(LVM_TE_Data_t));
-
- pInstance->pTE_State = (LVM_TE_Coefs_t *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- sizeof(LVM_TE_Coefs_t));
+ pInstance->pTE_Taps = (LVM_TE_Data_t *)calloc(1, sizeof(*(pInstance->pTE_Taps)));
+ if (pInstance->pTE_Taps == LVM_NULL)
+ {
+ return LVM_NULLADDRESS;
+ }
+ pInstance->pTE_State = (LVM_TE_Coefs_t *)calloc(1, sizeof(*(pInstance->pTE_State)));
+ if (pInstance->pTE_State == LVM_NULL)
+ {
+ return LVM_NULLADDRESS;
+ }
pInstance->Params.TE_OperatingMode = LVM_TE_OFF;
pInstance->Params.TE_EffectLevel = 0;
pInstance->TE_Active = LVM_FALSE;
@@ -699,21 +239,26 @@
LVC_Mixer_VarSlope_SetTimeConstant(&pInstance->VC_BalanceMix.MixerStream[1],LVM_VC_MIXER_TIME,LVM_FS_8000,2);
/*
- * Set the default EQNB pre-gain and pointer to the band definitions
+ * Create the default EQNB pre-gain and pointer to the band definitions
*/
- pInstance->pEQNB_BandDefs =
- (LVM_EQNB_BandDef_t *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- (pInstParams->EQNB_NumBands * sizeof(LVM_EQNB_BandDef_t)));
- pInstance->pEQNB_UserDefs =
- (LVM_EQNB_BandDef_t *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- (pInstParams->EQNB_NumBands * sizeof(LVM_EQNB_BandDef_t)));
+ pInstance->pEQNB_BandDefs = (LVM_EQNB_BandDef_t *)
+ calloc(pInstParams->EQNB_NumBands, sizeof(*(pInstance->pEQNB_BandDefs)));
+ if (pInstance->pEQNB_BandDefs == LVM_NULL)
+ {
+ return LVM_NULLADDRESS;
+ }
+ pInstance->pEQNB_UserDefs = (LVM_EQNB_BandDef_t *)
+ calloc(pInstParams->EQNB_NumBands, sizeof(*(pInstance->pEQNB_UserDefs)));
+ if (pInstance->pEQNB_UserDefs == LVM_NULL)
+ {
+ return LVM_NULLADDRESS;
+ }
/*
* Initialise the Concert Sound module
*/
{
LVCS_Handle_t hCSInstance; /* Instance handle */
- LVCS_MemTab_t CS_MemTab; /* Memory table */
LVCS_Capabilities_t CS_Capabilities; /* Initial capabilities */
LVCS_ReturnStatus_en LVCS_Status; /* Function call status */
@@ -733,26 +278,12 @@
CS_Capabilities.pBundleInstance = (void*)pInstance;
/*
- * Get the memory requirements and then set the address pointers, forcing alignment
- */
- LVCS_Status = LVCS_Memory(LVM_NULL, /* Get the memory requirements */
- &CS_MemTab,
- &CS_Capabilities);
- CS_MemTab.Region[LVCS_MEMREGION_PERSISTENT_SLOW_DATA].pBaseAddress = &pInstance->CS_Instance;
- CS_MemTab.Region[LVCS_MEMREGION_PERSISTENT_FAST_DATA].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- CS_MemTab.Region[LVCS_MEMREGION_PERSISTENT_FAST_DATA].Size);
- CS_MemTab.Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- CS_MemTab.Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].Size);
- CS_MemTab.Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST],
- 0);
-
- /*
* Initialise the Concert Sound instance and save the instance handle
*/
hCSInstance = LVM_NULL; /* Set to NULL to return handle */
- LVCS_Status = LVCS_Init(&hCSInstance, /* Initiailse */
- &CS_MemTab,
- &CS_Capabilities);
+ LVCS_Status = LVCS_Init(&hCSInstance, /* Create and initiailse */
+ &CS_Capabilities,
+ pInstance->pScratch);
if (LVCS_Status != LVCS_SUCCESS) return((LVM_ReturnStatus_en)LVCS_Status);
pInstance->hCSInstance = hCSInstance; /* Save the instance handle */
@@ -763,7 +294,6 @@
*/
{
LVDBE_Handle_t hDBEInstance; /* Instance handle */
- LVDBE_MemTab_t DBE_MemTab; /* Memory table */
LVDBE_Capabilities_t DBE_Capabilities; /* Initial capabilities */
LVDBE_ReturnStatus_en LVDBE_Status; /* Function call status */
@@ -787,30 +317,19 @@
LVDBE_CAP_FS_48000 | LVDBE_CAP_FS_88200 |
LVDBE_CAP_FS_96000 | LVDBE_CAP_FS_176400 |
LVDBE_CAP_FS_192000;
- DBE_Capabilities.CentreFrequency = LVDBE_CAP_CENTRE_55Hz | LVDBE_CAP_CENTRE_55Hz | LVDBE_CAP_CENTRE_66Hz | LVDBE_CAP_CENTRE_78Hz | LVDBE_CAP_CENTRE_90Hz;
- DBE_Capabilities.MaxBlockSize = (LVM_UINT16)InternalBlockSize;
- /*
- * Get the memory requirements and then set the address pointers
- */
- LVDBE_Status = LVDBE_Memory(LVM_NULL, /* Get the memory requirements */
- &DBE_MemTab,
- &DBE_Capabilities);
- DBE_MemTab.Region[LVDBE_MEMREGION_INSTANCE].pBaseAddress = &pInstance->DBE_Instance;
- DBE_MemTab.Region[LVDBE_MEMREGION_PERSISTENT_DATA].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- DBE_MemTab.Region[LVDBE_MEMREGION_PERSISTENT_DATA].Size);
- DBE_MemTab.Region[LVDBE_MEMREGION_PERSISTENT_COEF].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- DBE_MemTab.Region[LVDBE_MEMREGION_PERSISTENT_COEF].Size);
- DBE_MemTab.Region[LVDBE_MEMREGION_SCRATCH].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST],
- 0);
+ DBE_Capabilities.CentreFrequency = LVDBE_CAP_CENTRE_55Hz | LVDBE_CAP_CENTRE_55Hz |
+ LVDBE_CAP_CENTRE_66Hz | LVDBE_CAP_CENTRE_78Hz |
+ LVDBE_CAP_CENTRE_90Hz;
+ DBE_Capabilities.MaxBlockSize = (LVM_UINT16)InternalBlockSize;
/*
* Initialise the Dynamic Bass Enhancement instance and save the instance handle
*/
hDBEInstance = LVM_NULL; /* Set to NULL to return handle */
- LVDBE_Status = LVDBE_Init(&hDBEInstance, /* Initiailse */
- &DBE_MemTab,
- &DBE_Capabilities);
+ LVDBE_Status = LVDBE_Init(&hDBEInstance, /* Create and initiailse */
+ &DBE_Capabilities,
+ pInstance->pScratch);
if (LVDBE_Status != LVDBE_SUCCESS) return((LVM_ReturnStatus_en)LVDBE_Status);
pInstance->hDBEInstance = hDBEInstance; /* Save the instance handle */
}
@@ -820,7 +339,6 @@
*/
{
LVEQNB_Handle_t hEQNBInstance; /* Instance handle */
- LVEQNB_MemTab_t EQNB_MemTab; /* Memory table */
LVEQNB_Capabilities_t EQNB_Capabilities; /* Initial capabilities */
LVEQNB_ReturnStatus_en LVEQNB_Status; /* Function call status */
@@ -842,6 +360,7 @@
LVEQNB_CAP_FS_48000 | LVEQNB_CAP_FS_88200 |
LVEQNB_CAP_FS_96000 | LVEQNB_CAP_FS_176400 |
LVEQNB_CAP_FS_192000;
+
EQNB_Capabilities.MaxBlockSize = (LVM_UINT16)InternalBlockSize;
EQNB_Capabilities.MaxBands = pInstParams->EQNB_NumBands;
EQNB_Capabilities.SourceFormat = LVEQNB_CAP_STEREO | LVEQNB_CAP_MONOINSTEREO;
@@ -849,26 +368,12 @@
EQNB_Capabilities.pBundleInstance = (void*)pInstance;
/*
- * Get the memory requirements and then set the address pointers, forcing alignment
- */
- LVEQNB_Status = LVEQNB_Memory(LVM_NULL, /* Get the memory requirements */
- &EQNB_MemTab,
- &EQNB_Capabilities);
- EQNB_MemTab.Region[LVEQNB_MEMREGION_INSTANCE].pBaseAddress = &pInstance->EQNB_Instance;
- EQNB_MemTab.Region[LVEQNB_MEMREGION_PERSISTENT_DATA].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- EQNB_MemTab.Region[LVEQNB_MEMREGION_PERSISTENT_DATA].Size);
- EQNB_MemTab.Region[LVEQNB_MEMREGION_PERSISTENT_COEF].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- EQNB_MemTab.Region[LVEQNB_MEMREGION_PERSISTENT_COEF].Size);
- EQNB_MemTab.Region[LVEQNB_MEMREGION_SCRATCH].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST],
- 0);
-
- /*
* Initialise the Dynamic Bass Enhancement instance and save the instance handle
*/
hEQNBInstance = LVM_NULL; /* Set to NULL to return handle */
- LVEQNB_Status = LVEQNB_Init(&hEQNBInstance, /* Initiailse */
- &EQNB_MemTab,
- &EQNB_Capabilities);
+ LVEQNB_Status = LVEQNB_Init(&hEQNBInstance, /* Create and initiailse */
+ &EQNB_Capabilities,
+ pInstance->pScratch);
if (LVEQNB_Status != LVEQNB_SUCCESS) return((LVM_ReturnStatus_en)LVEQNB_Status);
pInstance->hEQNBInstance = hEQNBInstance; /* Save the instance handle */
}
@@ -878,11 +383,17 @@
*/
{
pInstance->pHeadroom_BandDefs = (LVM_HeadroomBandDef_t *)
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- (LVM_HEADROOM_MAX_NBANDS * sizeof(LVM_HeadroomBandDef_t)));
+ calloc(LVM_HEADROOM_MAX_NBANDS, sizeof(*(pInstance->pHeadroom_BandDefs)));
+ if (pInstance->pHeadroom_BandDefs == LVM_NULL)
+ {
+ return LVM_NULLADDRESS;
+ }
pInstance->pHeadroom_UserDefs = (LVM_HeadroomBandDef_t *)
- InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- (LVM_HEADROOM_MAX_NBANDS * sizeof(LVM_HeadroomBandDef_t)));
+ calloc(LVM_HEADROOM_MAX_NBANDS, sizeof(*(pInstance->pHeadroom_UserDefs)));
+ if (pInstance->pHeadroom_UserDefs == LVM_NULL)
+ {
+ return LVM_NULLADDRESS;
+ }
/* Headroom management parameters initialisation */
pInstance->NewHeadroomParams.NHeadroomBands = 2;
@@ -903,7 +414,6 @@
*/
{
pLVPSA_Handle_t hPSAInstance = LVM_NULL; /* Instance handle */
- LVPSA_MemTab_t PSA_MemTab;
LVPSA_RETURN PSA_Status; /* Function call status */
LVPSA_FilterParam_t FiltersParams[9];
@@ -920,41 +430,18 @@
FiltersParams[i].PostGain = (LVM_INT16) 0;
}
- /*Get the memory requirements and then set the address pointers*/
- PSA_Status = LVPSA_Memory (hPSAInstance,
- &PSA_MemTab,
- &pInstance->PSA_InitParams);
-
- if (PSA_Status != LVPSA_OK)
- {
- return((LVM_ReturnStatus_en) LVM_ALGORITHMPSA);
- }
-
- /* Slow Data */
- PSA_MemTab.Region[LVM_PERSISTENT_SLOW_DATA].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
- PSA_MemTab.Region[LVM_PERSISTENT_SLOW_DATA].Size);
-
- /* Fast Data */
- PSA_MemTab.Region[LVM_PERSISTENT_FAST_DATA].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_DATA],
- PSA_MemTab.Region[LVM_PERSISTENT_FAST_DATA].Size);
-
- /* Fast Coef */
- PSA_MemTab.Region[LVM_PERSISTENT_FAST_COEF].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_FAST_COEF],
- PSA_MemTab.Region[LVM_PERSISTENT_FAST_COEF].Size);
-
- /* Fast Temporary */
- pInstance->pPSAInput = (LVM_FLOAT *)InstAlloc_AddMember(&AllocMem[LVM_TEMPORARY_FAST],
- (LVM_UINT32) MAX_INTERNAL_BLOCKSIZE * \
- sizeof(LVM_FLOAT));
- PSA_MemTab.Region[LVM_TEMPORARY_FAST].pBaseAddress = (void *)InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST],0);
-
/*Initialise PSA instance and save the instance handle*/
pInstance->PSA_ControlParams.Fs = LVM_FS_48000;
pInstance->PSA_ControlParams.LevelDetectionSpeed = LVPSA_SPEED_MEDIUM;
+ pInstance->pPSAInput = (LVM_FLOAT *)calloc(MAX_INTERNAL_BLOCKSIZE, sizeof(LVM_FLOAT));
+ if (pInstance->pPSAInput == LVM_NULL)
+ {
+ return LVM_NULLADDRESS;
+ }
PSA_Status = LVPSA_Init (&hPSAInstance,
&pInstance->PSA_InitParams,
&pInstance->PSA_ControlParams,
- &PSA_MemTab);
+ pInstance->pScratch);
if (PSA_Status != LVPSA_OK)
{
@@ -1007,6 +494,111 @@
return(Status);
}
+/****************************************************************************************/
+/* */
+/* FUNCTION: LVM_DelInstanceHandle */
+/* */
+/* DESCRIPTION: */
+/* This function is used to create a bundle instance. It returns the created instance */
+/* handle through phInstance. All parameters are set to their default, inactive state. */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to the instance handle */
+/* */
+/* NOTES: */
+/* 1. This function must not be interrupted by the LVM_Process function */
+/* */
+/****************************************************************************************/
+void LVM_DelInstanceHandle(LVM_Handle_t *phInstance)
+{
+ LVM_Instance_t *pInstance = (LVM_Instance_t *)*phInstance;
+
+ if (pInstance->pScratch != LVM_NULL) {
+ free(pInstance->pScratch);
+ pInstance->pScratch = LVM_NULL;
+ }
+
+ if (pInstance->InstParams.BufferMode == LVM_MANAGED_BUFFERS) {
+ /*
+ * Managed buffers required
+ */
+ if (pInstance->pBufferManagement != LVM_NULL) {
+ free(pInstance->pBufferManagement);
+ pInstance->pBufferManagement = LVM_NULL;
+ }
+ }
+
+ /*
+ * Treble Enhancement
+ */
+ if (pInstance->pTE_Taps != LVM_NULL) {
+ free(pInstance->pTE_Taps);
+ pInstance->pTE_Taps = LVM_NULL;
+ }
+ if (pInstance->pTE_State != LVM_NULL) {
+ free(pInstance->pTE_State);
+ pInstance->pTE_State = LVM_NULL;
+ }
+
+ /*
+ * Free the default EQNB pre-gain and pointer to the band definitions
+ */
+ if (pInstance->pEQNB_BandDefs != LVM_NULL) {
+ free(pInstance->pEQNB_BandDefs);
+ pInstance->pEQNB_BandDefs = LVM_NULL;
+ }
+ if (pInstance->pEQNB_UserDefs != LVM_NULL) {
+ free(pInstance->pEQNB_UserDefs);
+ pInstance->pEQNB_UserDefs = LVM_NULL;
+ }
+
+ /*
+ * De-initialise the Concert Sound module
+ */
+ if (pInstance->hCSInstance != LVM_NULL) {
+ LVCS_DeInit(&pInstance->hCSInstance);
+ }
+
+ /*
+ * De-initialise the Bass Enhancement module
+ */
+ if (pInstance->hDBEInstance != LVM_NULL) {
+ LVDBE_DeInit(&pInstance->hDBEInstance);
+ }
+
+ /*
+ * De-initialise the N-Band Equaliser module
+ */
+ if (pInstance->hEQNBInstance != LVM_NULL) {
+ LVEQNB_DeInit(&pInstance->hEQNBInstance);
+ }
+
+ /*
+ * Free Headroom management memory.
+ */
+ if (pInstance->pHeadroom_BandDefs != LVM_NULL) {
+ free(pInstance->pHeadroom_BandDefs);
+ pInstance->pHeadroom_BandDefs = LVM_NULL;
+ }
+ if (pInstance->pHeadroom_UserDefs != LVM_NULL) {
+ free(pInstance->pHeadroom_UserDefs);
+ pInstance->pHeadroom_UserDefs = LVM_NULL;
+ }
+
+ /*
+ * De-initialise the PSA module
+ */
+ if (pInstance->hPSAInstance != LVM_NULL) {
+ LVPSA_DeInit(&pInstance->hPSAInstance);
+ }
+ if (pInstance->pPSAInput != LVM_NULL) {
+ free(pInstance->pPSAInput);
+ pInstance->pPSAInput = LVM_NULL;
+ }
+
+ free(*phInstance);
+ return;
+}
/****************************************************************************************/
/* */
@@ -1029,7 +621,6 @@
LVM_ReturnStatus_en LVM_ClearAudioBuffers(LVM_Handle_t hInstance)
{
- LVM_MemTab_t MemTab; /* Memory table */
LVM_InstParams_t InstParams; /* Instance parameters */
LVM_ControlParams_t Params; /* Control Parameters */
LVM_Instance_t *pInstance = (LVM_Instance_t *)hInstance; /* Pointer to Instance */
@@ -1045,17 +636,11 @@
/*Save the headroom parameters*/
LVM_GetHeadroomParams(hInstance, &HeadroomParams);
- /* Retrieve allocated buffers in memtab */
- LVM_GetMemoryTable(hInstance, &MemTab, LVM_NULL);
/* Save the instance parameters */
InstParams = pInstance->InstParams;
/* Call LVM_GetInstanceHandle to re-initialise the bundle */
- LVM_GetInstanceHandle( &hInstance,
- &MemTab,
- &InstParams);
-
/* Restore control parameters */ /* coverity[unchecked_value] */ /* Do not check return value internal function calls */
LVM_SetControlParameters(hInstance, &Params);
@@ -1063,11 +648,7 @@
LVM_SetHeadroomParams(hInstance, &HeadroomParams);
/* DC removal filter */
-#ifdef SUPPORT_MC
DC_Mc_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
-#else
- DC_2I_D16_TRC_WRA_01_Init(&pInstance->DC_RemovalInstance);
-#endif
return LVM_SUCCESS;
}
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
index ddaac99..a9492a1 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
@@ -113,40 +113,15 @@
/* */
/************************************************************************************/
-/* Memory region definition */
-typedef struct
-{
- LVM_UINT32 Size; /* Region size in bytes */
- LVM_UINT16 Alignment; /* Byte alignment */
- LVM_MemoryTypes_en Type; /* Region type */
- void *pBaseAddress; /* Pointer to the region base address */
-} LVM_IntMemoryRegion_t;
-
-/* Memory table containing the region definitions */
-typedef struct
-{
- LVM_IntMemoryRegion_t Region[LVM_NR_MEMORY_REGIONS]; /* One definition for each region */
-} LVM_IntMemTab_t;
-
/* Buffer Management */
typedef struct
{
LVM_FLOAT *pScratch; /* Bundle scratch buffer */
LVM_INT16 BufferState; /* Buffer status */
-#ifdef SUPPORT_MC
LVM_FLOAT InDelayBuffer[3 * LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE];
-#else
- LVM_FLOAT InDelayBuffer[6 * MIN_INTERNAL_BLOCKSIZE]; /* Input buffer delay line, \
- left and right */
-#endif
LVM_INT16 InDelaySamples; /* Number of samples in the input delay buffer */
-#ifdef SUPPORT_MC
LVM_FLOAT OutDelayBuffer[LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE];
-#else
- LVM_FLOAT OutDelayBuffer[2 * MIN_INTERNAL_BLOCKSIZE]; /* Output buffer delay \
- line */
-#endif
LVM_INT16 OutDelaySamples; /* Number of samples in the output delay buffer, \
left and right */
LVM_INT16 SamplesToOutput; /* Samples to write to the output */
@@ -167,7 +142,6 @@
typedef struct
{
/* Public parameters */
- LVM_MemTab_t MemoryTable; /* Instance memory allocation table */
LVM_ControlParams_t Params; /* Control parameters */
LVM_InstParams_t InstParams; /* Instance parameters */
@@ -236,10 +210,9 @@
LVM_INT16 NoSmoothVolume; /* Enable or disable smooth volume changes*/
-#ifdef SUPPORT_MC
LVM_INT16 NrChannels;
LVM_INT32 ChMask;
-#endif
+ void *pScratch; /* Pointer to bundle scratch buffer*/
} LVM_Instance_t;
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
index dc86cfd..3af2327 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
@@ -64,11 +64,9 @@
LVM_FLOAT *pToProcess = (LVM_FLOAT *)pInData;
LVM_FLOAT *pProcessed = pOutData;
LVM_ReturnStatus_en Status;
-#ifdef SUPPORT_MC
LVM_INT32 NrChannels = pInstance->NrChannels;
LVM_INT32 ChMask = pInstance->ChMask;
#define NrFrames SampleCount // alias for clarity
-#endif
/*
* Check if the number of samples is zero
@@ -114,11 +112,9 @@
if (pInstance->ControlPending == LVM_TRUE)
{
Status = LVM_ApplyNewSettings(hInstance);
-#ifdef SUPPORT_MC
/* Update the local variable NrChannels from pInstance->NrChannels value */
NrChannels = pInstance->NrChannels;
ChMask = pInstance->ChMask;
-#endif
if(Status != LVM_SUCCESS)
{
@@ -136,10 +132,8 @@
(LVM_INT16)NumSamples); /* Number of input samples */
pInput = pOutData;
pToProcess = pOutData;
-#ifdef SUPPORT_MC
NrChannels = 2;
ChMask = AUDIO_CHANNEL_OUT_STEREO;
-#endif
}
/*
@@ -179,18 +173,11 @@
*/
if (pInstance->VC_Active!=0)
{
-#ifdef SUPPORT_MC
LVC_MixSoft_Mc_D16C31_SAT(&pInstance->VC_Volume,
pToProcess,
pProcessed,
(LVM_INT16)(NrFrames),
NrChannels);
-#else
- LVC_MixSoft_1St_D16C31_SAT(&pInstance->VC_Volume,
- pToProcess,
- pProcessed,
- (LVM_INT16)(2 * SampleCount)); /* Left and right*/
-#endif
pToProcess = pProcessed;
}
@@ -224,15 +211,9 @@
*/
if (pToProcess != pProcessed)
{
-#ifdef SUPPORT_MC
Copy_Float(pToProcess, /* Source */
pProcessed, /* Destination */
(LVM_INT16)(NrChannels * NrFrames)); /* Copy all samples */
-#else
- Copy_Float(pToProcess, /* Source */
- pProcessed, /* Destination */
- (LVM_INT16)(2 * SampleCount)); /* Left and right */
-#endif
}
/*
@@ -243,21 +224,13 @@
/*
* Apply the filter
*/
-#ifdef SUPPORT_MC
FO_Mc_D16F32C15_LShx_TRC_WRA_01(&pInstance->pTE_State->TrebleBoost_State,
pProcessed,
pProcessed,
(LVM_INT16)NrFrames,
(LVM_INT16)NrChannels);
-#else
- FO_2I_D16F32C15_LShx_TRC_WRA_01(&pInstance->pTE_State->TrebleBoost_State,
- pProcessed,
- pProcessed,
- (LVM_INT16)SampleCount);
-#endif
}
-#ifdef SUPPORT_MC
/*
* Volume balance
*/
@@ -267,15 +240,6 @@
NrFrames,
NrChannels,
ChMask);
-#else
- /*
- * Volume balance
- */
- LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
- pProcessed,
- pProcessed,
- SampleCount);
-#endif
/*
* Perform Parametric Spectum Analysis
@@ -283,16 +247,10 @@
if ((pInstance->Params.PSA_Enable == LVM_PSA_ON) &&
(pInstance->InstParams.PSA_Included == LVM_PSA_ON))
{
-#ifdef SUPPORT_MC
FromMcToMono_Float(pProcessed,
pInstance->pPSAInput,
(LVM_INT16)(NrFrames),
NrChannels);
-#else
- From2iToMono_Float(pProcessed,
- pInstance->pPSAInput,
- (LVM_INT16)(SampleCount));
-#endif
LVPSA_Process(pInstance->hPSAInstance,
pInstance->pPSAInput,
@@ -303,18 +261,11 @@
/*
* DC removal
*/
-#ifdef SUPPORT_MC
DC_Mc_D16_TRC_WRA_01(&pInstance->DC_RemovalInstance,
pProcessed,
pProcessed,
(LVM_INT16)NrFrames,
NrChannels);
-#else
- DC_2I_D16_TRC_WRA_01(&pInstance->DC_RemovalInstance,
- pProcessed,
- pProcessed,
- (LVM_INT16)SampleCount);
-#endif
}
/*
* Manage the output buffer
diff --git a/media/libeffects/lvm/lib/Common/lib/AGC.h b/media/libeffects/lvm/lib/Common/lib/AGC.h
index bef7fa1..6160452 100644
--- a/media/libeffects/lvm/lib/Common/lib/AGC.h
+++ b/media/libeffects/lvm/lib/Common/lib/AGC.h
@@ -54,14 +54,12 @@
const LVM_FLOAT *pMonoSrc, /* Mono source */
LVM_FLOAT *pDst, /* Stereo destination */
LVM_UINT16 n); /* Number of samples */
-#ifdef SUPPORT_MC
void AGC_MIX_VOL_Mc1Mon_D32_WRA(AGC_MIX_VOL_2St1Mon_FLOAT_t *pInstance, /* Instance pointer */
const LVM_FLOAT *pStSrc, /* Source */
const LVM_FLOAT *pMonoSrc, /* Mono source */
LVM_FLOAT *pDst, /* Destination */
LVM_UINT16 NrFrames, /* Number of frames */
LVM_UINT16 NrChannels); /* Number of channels */
-#endif
#endif /* __AGC_H__ */
diff --git a/media/libeffects/lvm/lib/Common/lib/BIQUAD.h b/media/libeffects/lvm/lib/Common/lib/BIQUAD.h
index c050cd0..b1eefb1 100644
--- a/media/libeffects/lvm/lib/Common/lib/BIQUAD.h
+++ b/media/libeffects/lvm/lib/Common/lib/BIQUAD.h
@@ -24,7 +24,6 @@
***********************************************************************************/
typedef struct
{
-#ifdef SUPPORT_MC
/* The memory region created by this structure instance is typecast
* into another structure containing a pointer and an array of filter
* coefficients. In one case this memory region is used for storing
@@ -32,9 +31,6 @@
*/
LVM_FLOAT *pStorage;
LVM_FLOAT Storage[LVM_MAX_CHANNELS];
-#else
- LVM_FLOAT Storage[6];
-#endif
} Biquad_FLOAT_Instance_t;
/**********************************************************************************
COEFFICIENT TYPE DEFINITIONS
@@ -94,12 +90,8 @@
typedef struct
{
-#ifdef SUPPORT_MC
/* LVM_MAX_CHANNELS channels, two taps of size LVM_FLOAT */
LVM_FLOAT Storage[ (LVM_MAX_CHANNELS * 2) ];
-#else
- LVM_FLOAT Storage[ (2 * 2) ]; /* Two channels, two taps of size LVM_FLOAT */
-#endif
} Biquad_2I_Order1_FLOAT_Taps_t;
/*** Types used for biquad, band pass and peaking filter **************************/
@@ -110,12 +102,8 @@
typedef struct
{
-#ifdef SUPPORT_MC
/* LVM_MAX_CHANNELS, four taps of size LVM_FLOAT */
LVM_FLOAT Storage[ (LVM_MAX_CHANNELS * 4) ];
-#else
- LVM_FLOAT Storage[ (2 * 4) ]; /* Two channels, four taps of size LVM_FLOAT */
-#endif
} Biquad_2I_Order2_FLOAT_Taps_t;
/* The names of the functions are changed to satisfy QAC rules: Name should be Unique withing 16 characters*/
#define BQ_2I_D32F32Cll_TRC_WRA_01_Init Init_BQ_2I_D32F32Cll_TRC_WRA_01
@@ -185,13 +173,11 @@
LVM_FLOAT *pDataIn,
LVM_FLOAT *pDataOut,
LVM_INT16 NrSamples);
-#ifdef SUPPORT_MC
void BQ_MC_D32F32C30_TRC_WRA_01 ( Biquad_FLOAT_Instance_t *pInstance,
LVM_FLOAT *pDataIn,
LVM_FLOAT *pDataOut,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
/**********************************************************************************
FUNCTION PROTOTYPES: FIRST ORDER FILTERS
@@ -223,13 +209,11 @@
LVM_FLOAT *pDataIn,
LVM_FLOAT *pDataOut,
LVM_INT16 NrSamples);
-#ifdef SUPPORT_MC
void FO_Mc_D16F32C15_LShx_TRC_WRA_01(Biquad_FLOAT_Instance_t *pInstance,
LVM_FLOAT *pDataIn,
LVM_FLOAT *pDataOut,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
/**********************************************************************************
FUNCTION PROTOTYPES: BAND PASS FILTERS
***********************************************************************************/
@@ -266,20 +250,17 @@
LVM_FLOAT *pDataIn,
LVM_FLOAT *pDataOut,
LVM_INT16 NrSamples);
-#ifdef SUPPORT_MC
void PK_Mc_D32F32C14G11_TRC_WRA_01(Biquad_FLOAT_Instance_t *pInstance,
LVM_FLOAT *pDataIn,
LVM_FLOAT *pDataOut,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
/**********************************************************************************
FUNCTION PROTOTYPES: DC REMOVAL FILTERS
***********************************************************************************/
/*** 16 bit data path STEREO ******************************************************/
-#ifdef SUPPORT_MC
void DC_Mc_D16_TRC_WRA_01_Init ( Biquad_FLOAT_Instance_t *pInstance);
void DC_Mc_D16_TRC_WRA_01 ( Biquad_FLOAT_Instance_t *pInstance,
@@ -287,14 +268,6 @@
LVM_FLOAT *pDataOut,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#else
-void DC_2I_D16_TRC_WRA_01_Init ( Biquad_FLOAT_Instance_t *pInstance);
-
-void DC_2I_D16_TRC_WRA_01 ( Biquad_FLOAT_Instance_t *pInstance,
- LVM_FLOAT *pDataIn,
- LVM_FLOAT *pDataOut,
- LVM_INT16 NrSamples);
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
index 8b687f6..008d192 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
@@ -54,26 +54,6 @@
#define LVM_NR_MEMORY_REGIONS 4 /* Number of memory regions */
-/* Memory partition type */
-#define LVM_MEM_PARTITION0 0 /* 1st memory partition */
-#define LVM_MEM_PARTITION1 1 /* 2nd memory partition */
-#define LVM_MEM_PARTITION2 2 /* 3rd memory partition */
-#define LVM_MEM_PARTITION3 3 /* 4th memory partition */
-
-/* Use type */
-#define LVM_MEM_PERSISTENT 0 /* Persistent memory type */
-#define LVM_MEM_SCRATCH 4 /* Scratch memory type */
-
-/* Access type */
-#define LVM_MEM_INTERNAL 0 /* Internal (fast) access memory */
-#define LVM_MEM_EXTERNAL 8 /* External (slow) access memory */
-
-/* Platform specific */
-#define LVM_PERSISTENT (LVM_MEM_PARTITION0+LVM_MEM_PERSISTENT+LVM_MEM_INTERNAL)
-#define LVM_PERSISTENT_DATA (LVM_MEM_PARTITION1+LVM_MEM_PERSISTENT+LVM_MEM_INTERNAL)
-#define LVM_PERSISTENT_COEF (LVM_MEM_PARTITION2+LVM_MEM_PERSISTENT+LVM_MEM_INTERNAL)
-#define LVM_SCRATCH (LVM_MEM_PARTITION3+LVM_MEM_SCRATCH+LVM_MEM_INTERNAL)
-
/****************************************************************************************/
/* */
/* Basic types */
@@ -102,11 +82,7 @@
typedef float effect_buffer_t;
-#ifdef SUPPORT_MC
#define LVM_MAX_CHANNELS 8 // FCC_8
-#else
-#define LVM_MAX_CHANNELS 2 // FCC_2
-#endif
/****************************************************************************************/
/* */
@@ -128,9 +104,7 @@
LVM_STEREO = 0,
LVM_MONOINSTEREO = 1,
LVM_MONO = 2,
-#ifdef SUPPORT_MC
LVM_MULTICHANNEL = 3,
-#endif
LVM_SOURCE_DUMMY = LVM_MAXENUM
} LVM_Format_en;
diff --git a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
index b27bac5..cbde91d 100644
--- a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
+++ b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
@@ -31,7 +31,6 @@
void Copy_Float( const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 n );
-#ifdef SUPPORT_MC
void Copy_Float_Mc_Stereo( const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
@@ -41,7 +40,6 @@
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT32 NrChannels);
-#endif
/*********************************************************************************
* note: In Mult3s_16x16() saturation of result is not taken care when *
@@ -110,12 +108,10 @@
void From2iToMono_Float( const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 n);
-#ifdef SUPPORT_MC
void FromMcToMono_Float(const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
void MSTo2i_Sat_Float( const LVM_FLOAT *srcM,
const LVM_FLOAT *srcS,
LVM_FLOAT *dst,
diff --git a/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.cpp b/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.cpp
index e18aa78..07fc0d1 100644
--- a/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.cpp
+++ b/media/libeffects/lvm/lib/Common/src/AGC_MIX_VOL_2St1Mon_D32_WRA.cpp
@@ -172,7 +172,6 @@
return;
}
-#ifdef SUPPORT_MC
/****************************************************************************************/
/* */
/* FUNCTION: AGC_MIX_VOL_Mc1Mon_D32_WRA */
@@ -314,4 +313,3 @@
return;
}
-#endif /*SUPPORT_MC*/
diff --git a/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.cpp b/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.cpp
index 78d1ba1..189fb9e 100644
--- a/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.cpp
+++ b/media/libeffects/lvm/lib/Common/src/BQ_2I_D32F32C30_TRC_WRA_01.cpp
@@ -120,7 +120,6 @@
}
-#ifdef SUPPORT_MC
/**************************************************************************
ASSUMPTIONS:
COEFS-
@@ -197,5 +196,4 @@
}
}
-#endif /*SUPPORT_MC*/
diff --git a/media/libeffects/lvm/lib/Common/src/Copy_16.cpp b/media/libeffects/lvm/lib/Common/src/Copy_16.cpp
index 3a50554..4b44f28 100644
--- a/media/libeffects/lvm/lib/Common/src/Copy_16.cpp
+++ b/media/libeffects/lvm/lib/Common/src/Copy_16.cpp
@@ -83,7 +83,6 @@
return;
}
-#ifdef SUPPORT_MC
// Extract out the stereo channel pair from multichannel source.
void Copy_Float_Mc_Stereo(const LVM_FLOAT *src,
LVM_FLOAT *dst,
@@ -143,5 +142,4 @@
StereoOut -= 2;
}
}
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.cpp b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.cpp
index a7ce4d3..f2b5813 100644
--- a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.cpp
+++ b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01.cpp
@@ -61,7 +61,6 @@
pBiquadState->RightDC = RightDC;
}
-#ifdef SUPPORT_MC
/*
* FUNCTION: DC_Mc_D16_TRC_WRA_01
*
@@ -112,4 +111,3 @@
}
}
-#endif
diff --git a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.cpp b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.cpp
index beee112..42d98f2 100644
--- a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.cpp
+++ b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Init.cpp
@@ -23,7 +23,6 @@
pBiquadState->LeftDC = 0.0f;
pBiquadState->RightDC = 0.0f;
}
-#ifdef SUPPORT_MC
void DC_Mc_D16_TRC_WRA_01_Init(Biquad_FLOAT_Instance_t *pInstance)
{
PFilter_FLOAT_State_Mc pBiquadState = (PFilter_FLOAT_State_Mc) pInstance;
@@ -33,4 +32,3 @@
pBiquadState->ChDC[i] = 0.0f;
}
}
-#endif
diff --git a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h
index 4170b3c..999abea 100644
--- a/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h
+++ b/media/libeffects/lvm/lib/Common/src/DC_2I_D16_TRC_WRA_01_Private.h
@@ -28,11 +28,9 @@
LVM_FLOAT RightDC; /* RightDC */
}Filter_FLOAT_State;
typedef Filter_FLOAT_State * PFilter_FLOAT_State ;
-#ifdef SUPPORT_MC
typedef struct _Filter_FLOAT_State_Mc_
{
LVM_FLOAT ChDC[LVM_MAX_CHANNELS]; /* ChannelDC */
} Filter_FLOAT_State_Mc;
typedef Filter_FLOAT_State_Mc * PFilter_FLOAT_State_Mc ;
-#endif
#endif /* _DC_2I_D16_TRC_WRA_01_PRIVATE_H_ */
diff --git a/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.cpp b/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.cpp
index 6ca819a..605932d 100644
--- a/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.cpp
+++ b/media/libeffects/lvm/lib/Common/src/FO_2I_D16F32C15_LShx_TRC_WRA_01.cpp
@@ -113,7 +113,6 @@
}
}
-#ifdef SUPPORT_MC
/**************************************************************************
ASSUMPTIONS:
COEFS-
@@ -195,4 +194,3 @@
pDelays -= NrChannels * 2;
}
}
-#endif
diff --git a/media/libeffects/lvm/lib/Common/src/From2iToMono_32.cpp b/media/libeffects/lvm/lib/Common/src/From2iToMono_32.cpp
index a8688b4..6b52feb 100644
--- a/media/libeffects/lvm/lib/Common/src/From2iToMono_32.cpp
+++ b/media/libeffects/lvm/lib/Common/src/From2iToMono_32.cpp
@@ -67,7 +67,6 @@
return;
}
-#ifdef SUPPORT_MC
/*
* FUNCTION: FromMcToMono_Float
*
@@ -107,6 +106,5 @@
return;
}
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.cpp b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.cpp
index 14d61bd..d4f42de 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.cpp
@@ -56,7 +56,6 @@
}
}
-#ifdef SUPPORT_MC
void LVC_Core_MixHard_1St_MC_float_SAT (Mix_Private_FLOAT_st **ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
@@ -80,5 +79,4 @@
}
}
}
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.cpp b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.cpp
index 318138d..7d13d5c 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixInSoft_D16C31_SAT.cpp
@@ -113,7 +113,6 @@
}
pInstance->Current = Current;
}
-#ifdef SUPPORT_MC
/*
* FUNCTION: LVC_Core_MixInSoft_Mc_D16C31_SAT
*
@@ -245,5 +244,4 @@
pInstance->Current = Current;
}
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.cpp b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.cpp
index 1f4b08a..784f339 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.cpp
@@ -145,7 +145,6 @@
pInstanceR->Current = CurrentR;
}
-#ifdef SUPPORT_MC
void LVC_Core_MixSoft_1St_MC_float_WRA (Mix_Private_FLOAT_st **ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
@@ -189,5 +188,4 @@
ptrInstance[ch]->Current = tempCurrent[ch];
}
}
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.cpp b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.cpp
index 5d8aadc..57f037e 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_D16C31_WRA.cpp
@@ -105,7 +105,6 @@
pInstance->Current=Current;
}
-#ifdef SUPPORT_MC
/*
* FUNCTION: LVC_Core_MixSoft_Mc_D16C31_WRA
*
@@ -214,6 +213,5 @@
}
pInstance->Current=Current;
}
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.cpp b/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.cpp
index 2bec3be..ede6dee 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixInSoft_D16C31_SAT.cpp
@@ -105,7 +105,6 @@
}
-#ifdef SUPPORT_MC
/*
* FUNCTION: LVC_MixInSoft_Mc_D16C31_SAT
*
@@ -202,6 +201,5 @@
}
}
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.cpp b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.cpp
index 3153ada..8fced60 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.cpp
@@ -37,7 +37,6 @@
/**********************************************************************************
FUNCTION LVC_MixSoft_1St_2i_D16C31_SAT
***********************************************************************************/
-#ifdef SUPPORT_MC
/* This threshold is used to decide on the processing to be applied on
* front center and back center channels
*/
@@ -231,7 +230,6 @@
}
}
}
-#endif
void LVC_MixSoft_1St_2i_D16C31_SAT( LVMixer3_2St_FLOAT_st *ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp
index 4d229da..f893919 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp
@@ -102,7 +102,6 @@
}
}
}
-#ifdef SUPPORT_MC
/*
* FUNCTION: LVC_MixSoft_Mc_D16C31_SAT
*
@@ -195,6 +194,5 @@
}
}
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.cpp b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.cpp
index 54ab79d..2958637 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_2St_D16C31_SAT.cpp
@@ -67,7 +67,6 @@
}
}
-#ifdef SUPPORT_MC
/*
* FUNCTION: LVC_MixSoft_2Mc_D16C31_SAT
*
@@ -128,6 +127,5 @@
src1, src2, dst, NrFrames * NrChannels);
}
}
-#endif
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
index ce42d2e..6206273 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
@@ -88,53 +88,45 @@
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 n);
-#ifdef SUPPORT_MC
void LVC_MixSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
void LVC_MixInSoft_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 n);
-#ifdef SUPPORT_MC
void LVC_MixInSoft_Mc_D16C31_SAT(LVMixer3_1St_FLOAT_st *pInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
void LVC_MixSoft_2St_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
const LVM_FLOAT *src1,
const LVM_FLOAT *src2,
LVM_FLOAT *dst, /* dst cannot be equal to src2 */
LVM_INT16 n);
-#ifdef SUPPORT_MC
void LVC_MixSoft_2Mc_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
const LVM_FLOAT *src1,
const LVM_FLOAT *src2,
LVM_FLOAT *dst, /* dst cannot be equal to src2 */
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
/**********************************************************************************/
/* For applying different gains to Left and right chennals */
/* MixerStream[0] applies to Left channel */
/* MixerStream[1] applies to Right channel */
/* Gain values should not be more that 1.0 */
/**********************************************************************************/
-#ifdef SUPPORT_MC
void LVC_MixSoft_1St_MC_float_SAT(LVMixer3_2St_FLOAT_st *pInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst, /* dst can be equal to src */
LVM_INT16 NrFrames,
LVM_INT32 NrChannels,
LVM_INT32 ChMask);
-#endif
void LVC_MixSoft_1St_2i_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst, /* dst can be equal to src */
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
index 123d22b..7cba671 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
@@ -50,24 +50,20 @@
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 n);
-#ifdef SUPPORT_MC
void LVC_Core_MixInSoft_Mc_D16C31_SAT(LVMixer3_FLOAT_st *ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
void LVC_Core_MixSoft_1St_D16C31_WRA( LVMixer3_FLOAT_st *ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 n);
-#ifdef SUPPORT_MC
void LVC_Core_MixSoft_Mc_D16C31_WRA(LVMixer3_FLOAT_st *ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
void LVC_Core_MixHard_2St_D16C31_SAT( LVMixer3_FLOAT_st *pInstance1,
LVMixer3_FLOAT_st *pInstance2,
const LVM_FLOAT *src1,
@@ -81,13 +77,11 @@
/* ptrInstance2 applies to Right channel */
/* Gain values should not be more that 1.0 */
/**********************************************************************************/
-#ifdef SUPPORT_MC
void LVC_Core_MixSoft_1St_MC_float_WRA(Mix_Private_FLOAT_st **ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
void LVC_Core_MixSoft_1St_2i_D16C31_WRA( LVMixer3_FLOAT_st *ptrInstance1,
LVMixer3_FLOAT_st *ptrInstance2,
const LVM_FLOAT *src,
@@ -100,13 +94,11 @@
/* ptrInstance2 applies to Right channel */
/* Gain values should not be more that 1.0 */
/**********************************************************************************/
-#ifdef SUPPORT_MC
void LVC_Core_MixHard_1St_MC_float_SAT(Mix_Private_FLOAT_st **ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT16 NrChannels);
-#endif
void LVC_Core_MixHard_1St_2i_D16C31_SAT( LVMixer3_FLOAT_st *ptrInstance1,
LVMixer3_FLOAT_st *ptrInstance2,
const LVM_FLOAT *src,
diff --git a/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.cpp b/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.cpp
index 3f62f99..23b4fae 100644
--- a/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.cpp
+++ b/media/libeffects/lvm/lib/Common/src/PK_2I_D32F32C14G11_TRC_WRA_01.cpp
@@ -117,7 +117,6 @@
}
-#ifdef SUPPORT_MC
/**************************************************************************
DELAYS-
pBiquadState->pDelays[0] to
@@ -189,4 +188,3 @@
}
}
-#endif
diff --git a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
index c5ddf77..41e2bb5 100644
--- a/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
+++ b/media/libeffects/lvm/lib/Eq/lib/LVEQNB.h
@@ -86,13 +86,6 @@
/* */
/****************************************************************************************/
-/* Memory table */
-#define LVEQNB_MEMREGION_INSTANCE 0 /* Offset to the instance memory region */
-#define LVEQNB_MEMREGION_PERSISTENT_DATA 1 /* Offset to persistent data memory region */
-#define LVEQNB_MEMREGION_PERSISTENT_COEF 2 /* Offset to persistent coefficient region */
-#define LVEQNB_MEMREGION_SCRATCH 3 /* Offset to data scratch memory region */
-#define LVEQNB_NR_MEMORY_REGIONS 4 /* Number of memory regions */
-
/* Callback events */
#define LVEQNB_EVENT_NONE 0x0000 /* Not a valid event */
#define LVEQNB_EVENT_ALGOFF 0x0001 /* EQNB has completed switch off */
@@ -122,16 +115,6 @@
LVEQNB_FILTER_DUMMY = LVM_MAXINT_32
} LVEQNB_FilterMode_en;
-/* Memory Types */
-typedef enum
-{
- LVEQNB_PERSISTENT = 0,
- LVEQNB_PERSISTENT_DATA = 1,
- LVEQNB_PERSISTENT_COEF = 2,
- LVEQNB_SCRATCH = 3,
- LVEQNB_MEMORY_MAX = LVM_MAXINT_32
-} LVEQNB_MemoryTypes_en;
-
/* Function return status */
typedef enum
{
@@ -173,9 +156,7 @@
{
LVEQNB_STEREO = 0,
LVEQNB_MONOINSTEREO = 1,
-#ifdef SUPPORT_MC
LVEQNB_MULTICHANNEL = 2,
-#endif
LVEQNB_SOURCE_MAX = LVM_MAXINT_32
} LVEQNB_SourceFormat_en;
@@ -220,21 +201,6 @@
/* */
/****************************************************************************************/
-/* Memory region definition */
-typedef struct
-{
- LVM_UINT32 Size; /* Region size in bytes */
- LVM_UINT16 Alignment; /* Region alignment in bytes */
- LVEQNB_MemoryTypes_en Type; /* Region type */
- void *pBaseAddress; /* Pointer to the region base address */
-} LVEQNB_MemoryRegion_t;
-
-/* Memory table containing the region definitions */
-typedef struct
-{
- LVEQNB_MemoryRegion_t Region[LVEQNB_NR_MEMORY_REGIONS]; /* One definition for each region */
-} LVEQNB_MemTab_t;
-
/* Equaliser band definition */
typedef struct
{
@@ -254,9 +220,7 @@
/* Equaliser parameters */
LVM_UINT16 NBands; /* Number of bands */
LVEQNB_BandDef_t *pBandDefinition; /* Pointer to equaliser definitions */
-#ifdef SUPPORT_MC
LVM_INT16 NrChannels;
-#endif
} LVEQNB_Params_t;
/* Capability structure */
@@ -283,78 +247,44 @@
/****************************************************************************************/
/* */
-/* FUNCTION: LVEQNB_Memory */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) the memory */
-/* base address pointers are NULL on return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the memory */
-/* table returns the allocated memory and base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* pCapabilities Pointer to the default capabilities */
-/* */
-/* RETURNS: */
-/* LVEQNB_SUCCESS Succeeded */
-/* LVEQNB_NULLADDRESS When any of pMemoryTable and pCapabilities is NULL address */
-/* */
-/* NOTES: */
-/* 1. This function may be interrupted by the LVEQNB_Process function */
-/* */
-/****************************************************************************************/
-
-LVEQNB_ReturnStatus_en LVEQNB_Memory(LVEQNB_Handle_t hInstance,
- LVEQNB_MemTab_t *pMemoryTable,
- LVEQNB_Capabilities_t *pCapabilities);
-
-/****************************************************************************************/
-/* */
/* FUNCTION: LVEQNB_Init */
/* */
/* DESCRIPTION: */
-/* Create and initialisation function for the N-Band equalliser module */
-/* */
-/* This function can be used to create an algorithm instance by calling with */
-/* hInstance set to NULL. In this case the algorithm returns the new instance */
-/* handle. */
-/* */
-/* This function can be used to force a full re-initialisation of the algorithm */
-/* by calling with hInstance = Instance Handle. In this case the memory table */
-/* should be correct for the instance, this can be ensured by calling the function */
-/* LVEQNB_Memory before calling this function. */
+/* Create and initialisation function for the N-Band equaliser module. */
/* */
/* PARAMETERS: */
-/* hInstance Instance handle */
-/* pMemoryTable Pointer to the memory definition table */
+/* phInstance Pointer to instance handle */
/* pCapabilities Pointer to the initialisation capabilities */
+/* pScratch Pointer to bundle scratch buffer */
/* */
/* RETURNS: */
/* LVEQNB_SUCCESS Initialisation succeeded */
-/* LVEQNB_NULLADDRESS When pCapabilities or pMemoryTableis or phInstance are NULL */
-/* LVEQNB_NULLADDRESS One or more of the memory regions has a NULL base address */
-/* pointer for a memory region with a non-zero size. */
-/* */
+/* LVEQNB_NULLADDRESS When pCapabilities or phInstance are NULL */
+/* LVEQNB_NULLADDRESS When allocated memory has a NULL base address */
/* */
/* NOTES: */
-/* 1. The instance handle is the pointer to the base address of the first memory */
-/* region. */
-/* 2. This function must not be interrupted by the LVEQNB_Process function */
+/* 1. This function must not be interrupted by the LVEQNB_Process function */
/* */
/****************************************************************************************/
-
LVEQNB_ReturnStatus_en LVEQNB_Init(LVEQNB_Handle_t *phInstance,
- LVEQNB_MemTab_t *pMemoryTable,
- LVEQNB_Capabilities_t *pCapabilities);
+ LVEQNB_Capabilities_t *pCapabilities,
+ void *pScratch);
+
+/****************************************************************************************/
+/* */
+/* FUNCTION: LVEQNB_DeInit */
+/* */
+/* DESCRIPTION: */
+/* Free the memories created during LVEQNB_Init including instance handle */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to instance handle */
+/* */
+/* NOTES: */
+/* 1. This function must not be interrupted by the LVEQNB_Process function */
+/* */
+/****************************************************************************************/
+void LVEQNB_DeInit(LVEQNB_Handle_t *phInstance);
/****************************************************************************************/
/* */
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Init.cpp b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Init.cpp
index 271a914..932af71 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Init.cpp
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Init.cpp
@@ -21,6 +21,7 @@
/* */
/****************************************************************************************/
+#include <stdlib.h>
#include "LVEQNB.h"
#include "LVEQNB_Private.h"
#include "InstAlloc.h"
@@ -28,255 +29,75 @@
/****************************************************************************************/
/* */
-/* FUNCTION: LVEQNB_Memory */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) the memory */
-/* base address pointers are NULL on return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the memory */
-/* table returns the allocated memory and base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* pCapabilities Pointer to the instance capabilities */
-/* */
-/* RETURNS: */
-/* LVEQNB_SUCCESS Succeeded */
-/* LVEQNB_NULLADDRESS When any of pMemoryTable and pCapabilities is NULL address */
-/* */
-/* NOTES: */
-/* 1. This function may be interrupted by the LVEQNB_Process function */
-/* */
-/****************************************************************************************/
-
-LVEQNB_ReturnStatus_en LVEQNB_Memory(LVEQNB_Handle_t hInstance,
- LVEQNB_MemTab_t *pMemoryTable,
- LVEQNB_Capabilities_t *pCapabilities)
-{
-
- INST_ALLOC AllocMem;
- LVEQNB_Instance_t *pInstance = (LVEQNB_Instance_t *)hInstance;
-
- if((pMemoryTable == LVM_NULL)|| (pCapabilities == LVM_NULL))
- {
- return LVEQNB_NULLADDRESS;
- }
-
- /*
- * Fill in the memory table
- */
- if (hInstance == LVM_NULL)
- {
- /*
- * Instance memory
- */
- InstAlloc_Init(&AllocMem,
- LVM_NULL);
- InstAlloc_AddMember(&AllocMem, /* Low pass filter */
- sizeof(LVEQNB_Instance_t));
- pMemoryTable->Region[LVEQNB_MEMREGION_INSTANCE].Size = InstAlloc_GetTotal(&AllocMem);
- pMemoryTable->Region[LVEQNB_MEMREGION_INSTANCE].Alignment = LVEQNB_INSTANCE_ALIGN;
- pMemoryTable->Region[LVEQNB_MEMREGION_INSTANCE].Type = LVEQNB_PERSISTENT;
- pMemoryTable->Region[LVEQNB_MEMREGION_INSTANCE].pBaseAddress = LVM_NULL;
-
- /*
- * Persistant data memory
- */
- InstAlloc_Init(&AllocMem,
- LVM_NULL);
- InstAlloc_AddMember(&AllocMem, /* Low pass filter */
- sizeof(Biquad_2I_Order2_FLOAT_Taps_t));
- InstAlloc_AddMember(&AllocMem, /* High pass filter */
- sizeof(Biquad_2I_Order2_FLOAT_Taps_t));
- /* Equaliser Biquad Taps */
- InstAlloc_AddMember(&AllocMem,
- (pCapabilities->MaxBands * sizeof(Biquad_2I_Order2_FLOAT_Taps_t)));
- /* Filter definitions */
- InstAlloc_AddMember(&AllocMem,
- (pCapabilities->MaxBands * sizeof(LVEQNB_BandDef_t)));
- /* Biquad types */
- InstAlloc_AddMember(&AllocMem,
- (pCapabilities->MaxBands * sizeof(LVEQNB_BiquadType_en)));
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_DATA].Size = InstAlloc_GetTotal(&AllocMem);
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_DATA].Alignment = LVEQNB_DATA_ALIGN;
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_DATA].Type = LVEQNB_PERSISTENT_DATA;
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_DATA].pBaseAddress = LVM_NULL;
-
- /*
- * Persistant coefficient memory
- */
- InstAlloc_Init(&AllocMem,
- LVM_NULL);
- InstAlloc_AddMember(&AllocMem, /* Low pass filter */
- sizeof(Biquad_FLOAT_Instance_t));
- InstAlloc_AddMember(&AllocMem, /* High pass filter */
- sizeof(Biquad_FLOAT_Instance_t));
- /* Equaliser Biquad Instance */
- InstAlloc_AddMember(&AllocMem,
- pCapabilities->MaxBands * sizeof(Biquad_FLOAT_Instance_t));
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_COEF].Size = InstAlloc_GetTotal(&AllocMem);
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_COEF].Alignment = LVEQNB_COEF_ALIGN;
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_COEF].Type = LVEQNB_PERSISTENT_COEF;
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_COEF].pBaseAddress = LVM_NULL;
-
- /*
- * Scratch memory
- */
- InstAlloc_Init(&AllocMem,
- LVM_NULL);
- InstAlloc_AddMember(&AllocMem, /* Low pass filter */
- LVEQNB_SCRATCHBUFFERS * sizeof(LVM_FLOAT) * \
- pCapabilities->MaxBlockSize);
- pMemoryTable->Region[LVEQNB_MEMREGION_SCRATCH].Size = InstAlloc_GetTotal(&AllocMem);
- pMemoryTable->Region[LVEQNB_MEMREGION_SCRATCH].Alignment = LVEQNB_SCRATCH_ALIGN;
- pMemoryTable->Region[LVEQNB_MEMREGION_SCRATCH].Type = LVEQNB_SCRATCH;
- pMemoryTable->Region[LVEQNB_MEMREGION_SCRATCH].pBaseAddress = LVM_NULL;
- }
- else
- {
- /* Read back memory allocation table */
- *pMemoryTable = pInstance->MemoryTable;
- }
-
- return(LVEQNB_SUCCESS);
-}
-
-/****************************************************************************************/
-/* */
/* FUNCTION: LVEQNB_Init */
/* */
/* DESCRIPTION: */
-/* Create and initialisation function for the N-Band equaliser module */
-/* */
-/* This function can be used to create an algorithm instance by calling with */
-/* hInstance set to NULL. In this case the algorithm returns the new instance */
-/* handle. */
-/* */
-/* This function can be used to force a full re-initialisation of the algorithm */
-/* by calling with hInstance = Instance Handle. In this case the memory table */
-/* should be correct for the instance, this can be ensured by calling the function */
-/* DBE_Memory before calling this function. */
+/* Create and initialisation function for the N-Band equaliser module. */
/* */
/* PARAMETERS: */
-/* hInstance Instance handle */
-/* pMemoryTable Pointer to the memory definition table */
-/* pCapabilities Pointer to the instance capabilities */
+/* phInstance Pointer to instance handle */
+/* pCapabilities Pointer to the initialisation capabilities */
+/* pScratch Pointer to bundle scratch buffer */
/* */
/* RETURNS: */
/* LVEQNB_SUCCESS Initialisation succeeded */
-/* LVEQNB_NULLADDRESS When pCapabilities or pMemoryTableis or phInstance are NULL */
-/* LVEQNB_NULLADDRESS One or more of the memory regions has a NULL base address */
-/* pointer for a memory region with a non-zero size. */
+/* LVEQNB_NULLADDRESS One or more memory has a NULL pointer - malloc failure */
/* */
/* NOTES: */
-/* 1. The instance handle is the pointer to the base address of the first memory */
-/* region. */
-/* 2. This function must not be interrupted by the LVEQNB_Process function */
+/* 1. This function must not be interrupted by the LVEQNB_Process function */
/* */
/****************************************************************************************/
LVEQNB_ReturnStatus_en LVEQNB_Init(LVEQNB_Handle_t *phInstance,
- LVEQNB_MemTab_t *pMemoryTable,
- LVEQNB_Capabilities_t *pCapabilities)
+ LVEQNB_Capabilities_t *pCapabilities,
+ void *pScratch)
{
LVEQNB_Instance_t *pInstance;
- LVM_UINT32 MemSize;
- INST_ALLOC AllocMem;
- LVM_INT32 i;
- /*
- * Check for NULL pointers
- */
- if((phInstance == LVM_NULL) || (pMemoryTable == LVM_NULL) || (pCapabilities == LVM_NULL))
+ *phInstance = calloc(1, sizeof(*pInstance));
+ if (phInstance == LVM_NULL)
+ {
+ return LVEQNB_NULLADDRESS;
+ }
+ pInstance =(LVEQNB_Instance_t *)*phInstance;
+
+ pInstance->Capabilities = *pCapabilities;
+ pInstance->pScratch = pScratch;
+
+ /* Equaliser Biquad Instance */
+ LVM_UINT32 MemSize = pCapabilities->MaxBands * sizeof(*(pInstance->pEQNB_FilterState_Float));
+ pInstance->pEQNB_FilterState_Float = (Biquad_FLOAT_Instance_t *)calloc(1, MemSize);
+ if (pInstance->pEQNB_FilterState_Float == LVM_NULL)
{
return LVEQNB_NULLADDRESS;
}
- /*
- * Check the memory table for NULL pointers
- */
- for (i = 0; i < LVEQNB_NR_MEMORY_REGIONS; i++)
+ MemSize = (pCapabilities->MaxBands * sizeof(*(pInstance->pEQNB_Taps_Float)));
+ pInstance->pEQNB_Taps_Float = (Biquad_2I_Order2_FLOAT_Taps_t *)calloc(1, MemSize);
+ if (pInstance->pEQNB_Taps_Float == LVM_NULL)
{
- if (pMemoryTable->Region[i].Size!=0)
- {
- if (pMemoryTable->Region[i].pBaseAddress==LVM_NULL)
- {
- return(LVEQNB_NULLADDRESS);
- }
- }
+ return LVEQNB_NULLADDRESS;
}
- /*
- * Set the instance handle if not already initialised
- */
-
- InstAlloc_Init(&AllocMem, pMemoryTable->Region[LVEQNB_MEMREGION_INSTANCE].pBaseAddress);
-
- if (*phInstance == LVM_NULL)
+ MemSize = (pCapabilities->MaxBands * sizeof(*(pInstance->pBandDefinitions)));
+ pInstance->pBandDefinitions = (LVEQNB_BandDef_t *)calloc(1, MemSize);
+ if (pInstance->pBandDefinitions == LVM_NULL)
{
- *phInstance = InstAlloc_AddMember(&AllocMem, sizeof(LVEQNB_Instance_t));
+ return LVEQNB_NULLADDRESS;
}
- pInstance =(LVEQNB_Instance_t *)*phInstance;
-
- /*
- * Save the memory table in the instance structure
- */
- pInstance->Capabilities = *pCapabilities;
-
- /*
- * Save the memory table in the instance structure and
- * set the structure pointers
- */
- pInstance->MemoryTable = *pMemoryTable;
-
- /*
- * Allocate coefficient memory
- */
- InstAlloc_Init(&AllocMem,
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_COEF].pBaseAddress);
-
- /* Equaliser Biquad Instance */
- pInstance->pEQNB_FilterState_Float = (Biquad_FLOAT_Instance_t *)
- InstAlloc_AddMember(&AllocMem, pCapabilities->MaxBands * \
- sizeof(Biquad_FLOAT_Instance_t));
-
- /*
- * Allocate data memory
- */
- InstAlloc_Init(&AllocMem,
- pMemoryTable->Region[LVEQNB_MEMREGION_PERSISTENT_DATA].pBaseAddress);
-
- MemSize = (pCapabilities->MaxBands * sizeof(Biquad_2I_Order2_FLOAT_Taps_t));
- pInstance->pEQNB_Taps_Float = (Biquad_2I_Order2_FLOAT_Taps_t *)InstAlloc_AddMember(&AllocMem,
- MemSize);
- MemSize = (pCapabilities->MaxBands * sizeof(LVEQNB_BandDef_t));
- pInstance->pBandDefinitions = (LVEQNB_BandDef_t *)InstAlloc_AddMember(&AllocMem,
- MemSize);
// clear all the bands, setting their gain to 0, otherwise when applying new params,
// it will compare against uninitialized values
memset(pInstance->pBandDefinitions, 0, MemSize);
- MemSize = (pCapabilities->MaxBands * sizeof(LVEQNB_BiquadType_en));
- pInstance->pBiquadType = (LVEQNB_BiquadType_en *)InstAlloc_AddMember(&AllocMem,
- MemSize);
- /*
- * Internally map, structure and allign scratch memory
- */
- InstAlloc_Init(&AllocMem,
- pMemoryTable->Region[LVEQNB_MEMREGION_SCRATCH].pBaseAddress);
+ MemSize = (pCapabilities->MaxBands * sizeof(*(pInstance->pBiquadType)));
+ pInstance->pBiquadType = (LVEQNB_BiquadType_en *)calloc(1, MemSize);
+ if (pInstance->pBiquadType == LVM_NULL)
+ {
+ return LVEQNB_NULLADDRESS;
+ }
- pInstance->pFastTemporary = (LVM_FLOAT *)InstAlloc_AddMember(&AllocMem,
- sizeof(LVM_FLOAT));
+ pInstance->pFastTemporary = (LVM_FLOAT *)pScratch;
/*
* Update the instance parameters
@@ -319,4 +140,48 @@
return(LVEQNB_SUCCESS);
}
+/****************************************************************************************/
+/* */
+/* FUNCTION: LVEQNB_DeInit */
+/* */
+/* DESCRIPTION: */
+/* Free the memories created during LVEQNB_Init including instance handle */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to instance handle */
+/* */
+/* NOTES: */
+/* 1. This function must not be interrupted by the LVEQNB_Process function */
+/* */
+/****************************************************************************************/
+
+void LVEQNB_DeInit(LVEQNB_Handle_t *phInstance)
+{
+
+ LVEQNB_Instance_t *pInstance;
+ if (phInstance == LVM_NULL) {
+ return;
+ }
+ pInstance =(LVEQNB_Instance_t *)*phInstance;
+
+ /* Equaliser Biquad Instance */
+ if (pInstance->pEQNB_FilterState_Float != LVM_NULL) {
+ free(pInstance->pEQNB_FilterState_Float);
+ pInstance->pEQNB_FilterState_Float = LVM_NULL;
+ }
+ if (pInstance->pEQNB_Taps_Float != LVM_NULL) {
+ free(pInstance->pEQNB_Taps_Float);
+ pInstance->pEQNB_Taps_Float = LVM_NULL;
+ }
+ if (pInstance->pBandDefinitions != LVM_NULL) {
+ free(pInstance->pBandDefinitions);
+ pInstance->pBandDefinitions = LVM_NULL;
+ }
+ if (pInstance->pBiquadType != LVM_NULL) {
+ free(pInstance->pBiquadType);
+ pInstance->pBiquadType = LVM_NULL;
+ }
+ free(pInstance);
+ *phInstance = LVM_NULL;
+}
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h
index 40facfb..9569d85 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Private.h
@@ -36,19 +36,6 @@
/* General */
#define LVEQNB_INVALID 0xFFFF /* Invalid init parameter */
-
-/* Memory */
-#define LVEQNB_INSTANCE_ALIGN 4 /* 32-bit alignment for instance structures */
-#define LVEQNB_DATA_ALIGN 4 /* 32-bit alignment for structures */
-#define LVEQNB_COEF_ALIGN 4 /* 32-bit alignment for long words */
-#ifdef SUPPORT_MC
-/* Number of buffers required for inplace processing */
-#define LVEQNB_SCRATCHBUFFERS (LVM_MAX_CHANNELS * 2)
-#else
-#define LVEQNB_SCRATCHBUFFERS 4 /* Number of buffers required for inplace processing */
-#endif
-#define LVEQNB_SCRATCH_ALIGN 4 /* 32-bit alignment for long data */
-
#define LVEQNB_BYPASS_MIXER_TC 100 /* Bypass Mixer TC */
/****************************************************************************************/
@@ -77,7 +64,7 @@
typedef struct
{
/* Public parameters */
- LVEQNB_MemTab_t MemoryTable; /* Instance memory allocation table */
+ void *pScratch; /* Pointer to bundle scratch buffer */
LVEQNB_Params_t Params; /* Instance parameters */
LVEQNB_Capabilities_t Capabilities; /* Instance capabilities */
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.cpp b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.cpp
index 65eff53..8dd5587 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.cpp
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.cpp
@@ -65,13 +65,9 @@
{ // updated to use samples = frames * channels.
LVEQNB_Instance_t *pInstance = (LVEQNB_Instance_t *)hInstance;
-#ifdef SUPPORT_MC
// Mono passed in as stereo
const LVM_INT32 NrChannels = pInstance->Params.NrChannels == 1
? 2 : pInstance->Params.NrChannels;
-#else
- const LVM_INT32 NrChannels = 2; // FCC_2
-#endif
const LVM_INT32 NrSamples = NrChannels * NrFrames;
/* Check for NULL pointers */
@@ -129,18 +125,11 @@
{
case LVEQNB_SinglePrecision_Float:
{
-#ifdef SUPPORT_MC
PK_Mc_D32F32C14G11_TRC_WRA_01(pBiquad,
pScratch,
pScratch,
(LVM_INT16)NrFrames,
(LVM_INT16)NrChannels);
-#else
- PK_2I_D32F32C14G11_TRC_WRA_01(pBiquad,
- pScratch,
- pScratch,
- (LVM_INT16)NrFrames);
-#endif
break;
}
default:
@@ -151,20 +140,12 @@
}
if(pInstance->bInOperatingModeTransition == LVM_TRUE){
-#ifdef SUPPORT_MC
LVC_MixSoft_2Mc_D16C31_SAT(&pInstance->BypassMixer,
pScratch,
pInData,
pScratch,
(LVM_INT16)NrFrames,
(LVM_INT16)NrChannels);
-#else
- LVC_MixSoft_2St_D16C31_SAT(&pInstance->BypassMixer,
- pScratch,
- pInData,
- pScratch,
- (LVM_INT16)NrSamples);
-#endif
// duplicate with else clause(s)
Copy_Float(pScratch, /* Source */
pOutData, /* Destination */
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.cpp b/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.cpp
index 2a75559..7a68c21 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.cpp
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_SetControlParameters.cpp
@@ -69,14 +69,10 @@
&& (pNewParams->SampleRate != LVM_FS_88200) && (pNewParams->SampleRate != LVM_FS_96000)
&& (pNewParams->SampleRate != LVM_FS_176400) && (pNewParams->SampleRate != LVM_FS_192000)
)
-#ifdef SUPPORT_MC
|| ((pNewParams->SourceFormat != LVM_STEREO) &&
(pNewParams->SourceFormat != LVM_MONOINSTEREO) &&
(pNewParams->SourceFormat != LVM_MONO) &&
(pNewParams->SourceFormat != LVM_MULTICHANNEL)))
-#else
- || ((pNewParams->SourceFormat != LVM_STEREO) && (pNewParams->SourceFormat != LVM_MONOINSTEREO) && (pNewParams->SourceFormat != LVM_MONO)) )
-#endif
{
return (LVREV_OUTOFRANGE);
}
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/lib/LVPSA.h b/media/libeffects/lvm/lib/SpectrumAnalyzer/lib/LVPSA.h
index c9fa7ad..0ba662a 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/lib/LVPSA.h
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/lib/LVPSA.h
@@ -22,28 +22,9 @@
/****************************************************************************************/
/* */
-/* CONSTANTS DEFINITIONS */
-/* */
-/****************************************************************************************/
-
-/* Memory table*/
-#define LVPSA_NR_MEMORY_REGIONS 4 /* Number of memory regions */
-
-/****************************************************************************************/
-/* */
/* TYPES DEFINITIONS */
/* */
/****************************************************************************************/
-/* Memory Types */
-typedef enum
-{
- LVPSA_PERSISTENT = LVM_PERSISTENT,
- LVPSA_PERSISTENT_DATA = LVM_PERSISTENT_DATA,
- LVPSA_PERSISTENT_COEF = LVM_PERSISTENT_COEF,
- LVPSA_SCRATCH = LVM_SCRATCH,
- LVPSA_MEMORY_DUMMY = LVM_MAXINT_32 /* Force 32 bits enum, don't use it! */
-} LVPSA_MemoryTypes_en;
-
/* Level detection speed control parameters */
typedef enum
{
@@ -80,20 +61,6 @@
} LVPSA_ControlParams_t, *pLVPSA_ControlParams_t;
-/* Memory region definition */
-typedef struct
-{
- LVM_UINT32 Size; /* Region size in bytes */
- LVPSA_MemoryTypes_en Type; /* Region type */
- void *pBaseAddress; /* Pointer to the region base address */
-} LVPSA_MemoryRegion_t;
-
-/* Memory table containing the region definitions */
-typedef struct
-{
- LVPSA_MemoryRegion_t Region[LVPSA_NR_MEMORY_REGIONS];/* One definition for each region */
-} LVPSA_MemTab_t;
-
/* Audio time type */
typedef LVM_INT32 LVPSA_Time;
@@ -113,62 +80,43 @@
/*********************************************************************************************************************************
FUNCTIONS PROTOTYPE
**********************************************************************************************************************************/
-/*********************************************************************************************************************************/
-/* */
-/* FUNCTION: LVPSA_Memory */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) the memory */
-/* base address pointers are NULL on return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the memory */
-/* table returns the allocated memory and base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* pInitParams Pointer to the instance init parameters */
-/* */
-/* RETURNS: */
-/* LVPSA_OK Succeeds */
-/* otherwise Error due to bad parameters */
-/* */
-/*********************************************************************************************************************************/
-LVPSA_RETURN LVPSA_Memory ( pLVPSA_Handle_t hInstance,
- LVPSA_MemTab_t *pMemoryTable,
- LVPSA_InitParams_t *pInitParams );
+/************************************************************************************/
+/* */
+/* FUNCTION: LVPSA_Init */
+/* */
+/* DESCRIPTION: */
+/* Create and Initialize the LVPSA module including instance handle */
+/* */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to the instance handle */
+/* InitParams Init parameters structure */
+/* ControlParams Control parameters structure */
+/* pScratch Pointer to bundle scratch memory area */
+/* */
+/* */
+/* RETURNS: */
+/* LVPSA_OK Succeeds */
+/* otherwise Error due to bad parameters */
+/* */
+/************************************************************************************/
+LVPSA_RETURN LVPSA_Init(pLVPSA_Handle_t *phInstance,
+ LVPSA_InitParams_t *pInitParams,
+ LVPSA_ControlParams_t *pControlParams,
+ void *pScratch);
-/*********************************************************************************************************************************/
-/* */
-/* FUNCTION: LVPSA_Init */
-/* */
-/* DESCRIPTION: */
-/* Initializes the LVPSA module. */
-/* */
-/* */
-/* PARAMETERS: */
-/* phInstance Pointer to the instance Handle */
-/* pInitParams Pointer to the instance init parameters */
-/* pControlParams Pointer to the instance control parameters */
-/* pMemoryTable Pointer to the memory definition table */
-/* */
-/* */
-/* RETURNS: */
-/* LVPSA_OK Succeeds */
-/* otherwise Error due to bad parameters */
-/* */
-/*********************************************************************************************************************************/
-LVPSA_RETURN LVPSA_Init ( pLVPSA_Handle_t *phInstance,
- LVPSA_InitParams_t *pInitParams,
- LVPSA_ControlParams_t *pControlParams,
- LVPSA_MemTab_t *pMemoryTable );
+/************************************************************************************/
+/* */
+/* FUNCTION: LVPSA_DeInit */
+/* */
+/* DESCRIPTION: */
+/* Free the memories created in LVPSA_Init call including instance handle */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to the instance handle */
+/* */
+/************************************************************************************/
+void LVPSA_DeInit(pLVPSA_Handle_t *phInstance);
/*********************************************************************************************************************************/
/* */
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Init.cpp b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Init.cpp
index 9fcd82f..be3c68f 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Init.cpp
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Init.cpp
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+#include <stdlib.h>
#include "LVPSA.h"
#include "LVPSA_Private.h"
#include "InstAlloc.h"
@@ -24,14 +25,14 @@
/* FUNCTION: LVPSA_Init */
/* */
/* DESCRIPTION: */
-/* Initialize the LVPSA module */
+/* Create and Initialize the LVPSA module including instance handle */
/* */
/* */
/* PARAMETERS: */
-/* phInstance Pointer to pointer to the instance */
+/* phInstance Pointer to the instance handle */
/* InitParams Init parameters structure */
/* ControlParams Control parameters structure */
-/* pMemoryTable Memory table that contains memory areas definition */
+/* pScratch Pointer to bundle scratch memory area */
/* */
/* */
/* RETURNS: */
@@ -39,10 +40,10 @@
/* otherwise Error due to bad parameters */
/* */
/************************************************************************************/
-LVPSA_RETURN LVPSA_Init ( pLVPSA_Handle_t *phInstance,
- LVPSA_InitParams_t *pInitParams,
- LVPSA_ControlParams_t *pControlParams,
- LVPSA_MemTab_t *pMemoryTable )
+LVPSA_RETURN LVPSA_Init(pLVPSA_Handle_t *phInstance,
+ LVPSA_InitParams_t *pInitParams,
+ LVPSA_ControlParams_t *pControlParams,
+ void *pScratch)
{
LVPSA_InstancePr_t *pLVPSA_Inst;
LVPSA_RETURN errorCode = LVPSA_OK;
@@ -50,64 +51,15 @@
extern LVM_FLOAT LVPSA_Float_GainTable[];
LVM_UINT32 BufferLength = 0;
- /* Ints_Alloc instances, needed for memory alignment management */
- INST_ALLOC Instance;
- INST_ALLOC Scratch;
- INST_ALLOC Data;
- INST_ALLOC Coef;
-
- /* Check parameters */
- if((phInstance == LVM_NULL) || (pInitParams == LVM_NULL) || (pControlParams == LVM_NULL) || (pMemoryTable == LVM_NULL))
- {
- return(LVPSA_ERROR_NULLADDRESS);
- }
- if( (pInitParams->SpectralDataBufferDuration > LVPSA_MAXBUFFERDURATION) ||
- (pInitParams->SpectralDataBufferDuration == 0) ||
- (pInitParams->MaxInputBlockSize > LVPSA_MAXINPUTBLOCKSIZE) ||
- (pInitParams->MaxInputBlockSize == 0) ||
- (pInitParams->nBands < LVPSA_NBANDSMIN) ||
- (pInitParams->nBands > LVPSA_NBANDSMAX) ||
- (pInitParams->pFiltersParams == 0))
- {
- return(LVPSA_ERROR_INVALIDPARAM);
- }
- for(ii = 0; ii < pInitParams->nBands; ii++)
- {
- if((pInitParams->pFiltersParams[ii].CenterFrequency > LVPSA_MAXCENTERFREQ) ||
- (pInitParams->pFiltersParams[ii].PostGain > LVPSA_MAXPOSTGAIN) ||
- (pInitParams->pFiltersParams[ii].PostGain < LVPSA_MINPOSTGAIN) ||
- (pInitParams->pFiltersParams[ii].QFactor < LVPSA_MINQFACTOR) ||
- (pInitParams->pFiltersParams[ii].QFactor > LVPSA_MAXQFACTOR))
- {
- return(LVPSA_ERROR_INVALIDPARAM);
- }
- }
-
- /*Inst_Alloc instances initialization */
- InstAlloc_Init( &Instance , pMemoryTable->Region[LVPSA_MEMREGION_INSTANCE].pBaseAddress);
- InstAlloc_Init( &Scratch , pMemoryTable->Region[LVPSA_MEMREGION_SCRATCH].pBaseAddress);
- InstAlloc_Init( &Data , pMemoryTable->Region[LVPSA_MEMREGION_PERSISTENT_DATA].pBaseAddress);
- InstAlloc_Init( &Coef , pMemoryTable->Region[LVPSA_MEMREGION_PERSISTENT_COEF].pBaseAddress);
-
/* Set the instance handle if not already initialised */
+ *phInstance = calloc(1, sizeof(*pLVPSA_Inst));
if (*phInstance == LVM_NULL)
{
- *phInstance = InstAlloc_AddMember( &Instance, sizeof(LVPSA_InstancePr_t) );
+ return LVPSA_ERROR_NULLADDRESS;
}
pLVPSA_Inst =(LVPSA_InstancePr_t*)*phInstance;
- /* Check the memory table for NULL pointers */
- for (ii = 0; ii < LVPSA_NR_MEMORY_REGIONS; ii++)
- {
- if (pMemoryTable->Region[ii].Size!=0)
- {
- if (pMemoryTable->Region[ii].pBaseAddress==LVM_NULL)
- {
- return(LVPSA_ERROR_NULLADDRESS);
- }
- pLVPSA_Inst->MemoryTable.Region[ii] = pMemoryTable->Region[ii];
- }
- }
+ pLVPSA_Inst->pScratch = pScratch;
/* Initialize module's internal parameters */
pLVPSA_Inst->bControlPending = LVM_FALSE;
@@ -137,31 +89,61 @@
}
/* Assign the pointers */
- pLVPSA_Inst->pPostGains =
- (LVM_FLOAT *)InstAlloc_AddMember(&Instance, pInitParams->nBands * sizeof(LVM_FLOAT));
- pLVPSA_Inst->pFiltersParams = (LVPSA_FilterParam_t *)
- InstAlloc_AddMember(&Instance, pInitParams->nBands * sizeof(LVPSA_FilterParam_t));
- pLVPSA_Inst->pSpectralDataBufferStart = (LVM_UINT8 *)
- InstAlloc_AddMember(&Instance, pInitParams->nBands * \
- pLVPSA_Inst->SpectralDataBufferLength * sizeof(LVM_UINT8));
- pLVPSA_Inst->pPreviousPeaks = (LVM_UINT8 *)
- InstAlloc_AddMember(&Instance, pInitParams->nBands * sizeof(LVM_UINT8));
- pLVPSA_Inst->pBPFiltersPrecision = (LVPSA_BPFilterPrecision_en *)
- InstAlloc_AddMember(&Instance, pInitParams->nBands * \
- sizeof(LVPSA_BPFilterPrecision_en));
- pLVPSA_Inst->pBP_Instances = (Biquad_FLOAT_Instance_t *)
- InstAlloc_AddMember(&Coef, pInitParams->nBands * \
- sizeof(Biquad_FLOAT_Instance_t));
- pLVPSA_Inst->pQPD_States = (QPD_FLOAT_State_t *)
- InstAlloc_AddMember(&Coef, pInitParams->nBands * \
- sizeof(QPD_FLOAT_State_t));
-
- pLVPSA_Inst->pBP_Taps = (Biquad_1I_Order2_FLOAT_Taps_t *)
- InstAlloc_AddMember(&Data, pInitParams->nBands * \
- sizeof(Biquad_1I_Order2_FLOAT_Taps_t));
- pLVPSA_Inst->pQPD_Taps = (QPD_FLOAT_Taps_t *)
- InstAlloc_AddMember(&Data, pInitParams->nBands * \
- sizeof(QPD_FLOAT_Taps_t));
+ pLVPSA_Inst->pPostGains = (LVM_FLOAT *)
+ calloc(pInitParams->nBands, sizeof(*(pLVPSA_Inst->pPostGains)));
+ if (pLVPSA_Inst->pPostGains == LVM_NULL)
+ {
+ return LVPSA_ERROR_NULLADDRESS;
+ }
+ pLVPSA_Inst->pFiltersParams = (LVPSA_FilterParam_t *)
+ calloc(pInitParams->nBands, sizeof(*(pLVPSA_Inst->pFiltersParams)));
+ if (pLVPSA_Inst->pFiltersParams == LVM_NULL)
+ {
+ return LVPSA_ERROR_NULLADDRESS;
+ }
+ pLVPSA_Inst->pSpectralDataBufferStart = (LVM_UINT8 *)
+ calloc(pInitParams->nBands, pLVPSA_Inst->SpectralDataBufferLength * \
+ sizeof(*(pLVPSA_Inst->pSpectralDataBufferStart)));
+ if (pLVPSA_Inst->pSpectralDataBufferStart == LVM_NULL)
+ {
+ return LVPSA_ERROR_NULLADDRESS;
+ }
+ pLVPSA_Inst->pPreviousPeaks = (LVM_UINT8 *)
+ calloc(pInitParams->nBands, sizeof(*(pLVPSA_Inst->pPreviousPeaks)));
+ if (pLVPSA_Inst->pPreviousPeaks == LVM_NULL)
+ {
+ return LVPSA_ERROR_NULLADDRESS;
+ }
+ pLVPSA_Inst->pBPFiltersPrecision = (LVPSA_BPFilterPrecision_en *)
+ calloc(pInitParams->nBands, sizeof(*(pLVPSA_Inst->pBPFiltersPrecision)));
+ if (pLVPSA_Inst->pBPFiltersPrecision == LVM_NULL)
+ {
+ return LVPSA_ERROR_NULLADDRESS;
+ }
+ pLVPSA_Inst->pBP_Instances = (Biquad_FLOAT_Instance_t *)
+ calloc(pInitParams->nBands, sizeof(*(pLVPSA_Inst->pBP_Instances)));
+ if (pLVPSA_Inst->pBP_Instances == LVM_NULL)
+ {
+ return LVPSA_ERROR_NULLADDRESS;
+ }
+ pLVPSA_Inst->pQPD_States = (QPD_FLOAT_State_t *)
+ calloc(pInitParams->nBands, sizeof(*(pLVPSA_Inst->pQPD_States)));
+ if (pLVPSA_Inst->pQPD_States == LVM_NULL)
+ {
+ return LVPSA_ERROR_NULLADDRESS;
+ }
+ pLVPSA_Inst->pBP_Taps = (Biquad_1I_Order2_FLOAT_Taps_t *)
+ calloc(pInitParams->nBands, sizeof(*(pLVPSA_Inst->pBP_Taps)));
+ if (pLVPSA_Inst->pBP_Taps == LVM_NULL)
+ {
+ return LVPSA_ERROR_NULLADDRESS;
+ }
+ pLVPSA_Inst->pQPD_Taps = (QPD_FLOAT_Taps_t *)
+ calloc(pInitParams->nBands, sizeof(*(pLVPSA_Inst->pQPD_Taps)));
+ if (pLVPSA_Inst->pQPD_Taps == LVM_NULL)
+ {
+ return LVPSA_ERROR_NULLADDRESS;
+ }
/* Copy filters parameters in the private instance */
for(ii = 0; ii < pLVPSA_Inst->nBands; ii++)
@@ -195,3 +177,60 @@
return(errorCode);
}
+/************************************************************************************/
+/* */
+/* FUNCTION: LVPSA_DeInit */
+/* */
+/* DESCRIPTION: */
+/* Free the memories created in LVPSA_Init call including instance handle */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to the instance handle */
+/* */
+/************************************************************************************/
+void LVPSA_DeInit(pLVPSA_Handle_t *phInstance)
+{
+ LVPSA_InstancePr_t *pLVPSA_Inst = (LVPSA_InstancePr_t *)*phInstance;
+ if (pLVPSA_Inst == LVM_NULL) {
+ return;
+ }
+ if (pLVPSA_Inst->pPostGains != LVM_NULL) {
+ free(pLVPSA_Inst->pPostGains);
+ pLVPSA_Inst->pPostGains = LVM_NULL;
+ }
+ if (pLVPSA_Inst->pFiltersParams != LVM_NULL) {
+ free(pLVPSA_Inst->pFiltersParams);
+ pLVPSA_Inst->pFiltersParams = LVM_NULL;
+ }
+ if (pLVPSA_Inst->pSpectralDataBufferStart != LVM_NULL) {
+ free(pLVPSA_Inst->pSpectralDataBufferStart);
+ pLVPSA_Inst->pSpectralDataBufferStart = LVM_NULL;
+ }
+ if (pLVPSA_Inst->pPreviousPeaks != LVM_NULL) {
+ free(pLVPSA_Inst->pPreviousPeaks);
+ pLVPSA_Inst->pPreviousPeaks = LVM_NULL;
+ }
+ if (pLVPSA_Inst->pBPFiltersPrecision != LVM_NULL) {
+ free(pLVPSA_Inst->pBPFiltersPrecision);
+ pLVPSA_Inst->pBPFiltersPrecision = LVM_NULL;
+ }
+ if (pLVPSA_Inst->pBP_Instances != LVM_NULL) {
+ free(pLVPSA_Inst->pBP_Instances);
+ pLVPSA_Inst->pBP_Instances = LVM_NULL;
+ }
+ if (pLVPSA_Inst->pQPD_States != LVM_NULL) {
+ free(pLVPSA_Inst->pQPD_States);
+ pLVPSA_Inst->pQPD_States = LVM_NULL;
+ }
+ if (pLVPSA_Inst->pBP_Taps != LVM_NULL) {
+ free(pLVPSA_Inst->pBP_Taps);
+ pLVPSA_Inst->pBP_Taps = LVM_NULL;
+ }
+ if (pLVPSA_Inst->pQPD_Taps != LVM_NULL) {
+ free(pLVPSA_Inst->pQPD_Taps);
+ pLVPSA_Inst->pQPD_Taps = LVM_NULL;
+ }
+ free(pLVPSA_Inst);
+ *phInstance = LVM_NULL;
+}
+
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Memory.cpp b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Memory.cpp
deleted file mode 100644
index eafcbe6..0000000
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Memory.cpp
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (C) 2004-2010 NXP Software
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LVPSA.h"
-#include "LVPSA_Private.h"
-#include "InstAlloc.h"
-
-/****************************************************************************************/
-/* */
-/* FUNCTION: LVEQNB_Memory */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) the memory */
-/* base address pointers are NULL on return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the memory */
-/* table returns the allocated memory and base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* InitParams Pointer to the instance init parameters */
-/* */
-/* RETURNS: */
-/* LVPSA_OK Succeeds */
-/* otherwise Error due to bad parameters */
-/* */
-/****************************************************************************************/
-LVPSA_RETURN LVPSA_Memory ( pLVPSA_Handle_t hInstance,
- LVPSA_MemTab_t *pMemoryTable,
- LVPSA_InitParams_t *pInitParams )
-{
- LVM_UINT32 ii;
- LVM_UINT32 BufferLength;
- INST_ALLOC Instance;
- INST_ALLOC Scratch;
- INST_ALLOC Data;
- INST_ALLOC Coef;
- LVPSA_InstancePr_t *pLVPSA_Inst = (LVPSA_InstancePr_t*)hInstance;
-
- InstAlloc_Init( &Instance , LVM_NULL);
- InstAlloc_Init( &Scratch , LVM_NULL);
- InstAlloc_Init( &Data , LVM_NULL);
- InstAlloc_Init( &Coef , LVM_NULL);
-
- if((pMemoryTable == LVM_NULL) || (pInitParams == LVM_NULL))
- {
- return(LVPSA_ERROR_NULLADDRESS);
- }
-
- /*
- * Fill in the memory table
- */
- if (hInstance == LVM_NULL)
- {
-
- /* Check init parameter */
- if( (pInitParams->SpectralDataBufferDuration > LVPSA_MAXBUFFERDURATION) ||
- (pInitParams->SpectralDataBufferDuration == 0) ||
- (pInitParams->MaxInputBlockSize > LVPSA_MAXINPUTBLOCKSIZE) ||
- (pInitParams->MaxInputBlockSize == 0) ||
- (pInitParams->nBands < LVPSA_NBANDSMIN) ||
- (pInitParams->nBands > LVPSA_NBANDSMAX) ||
- (pInitParams->pFiltersParams == 0))
- {
- return(LVPSA_ERROR_INVALIDPARAM);
- }
- for(ii = 0; ii < pInitParams->nBands; ii++)
- {
- if((pInitParams->pFiltersParams[ii].CenterFrequency > LVPSA_MAXCENTERFREQ) ||
- (pInitParams->pFiltersParams[ii].PostGain > LVPSA_MAXPOSTGAIN) ||
- (pInitParams->pFiltersParams[ii].PostGain < LVPSA_MINPOSTGAIN) ||
- (pInitParams->pFiltersParams[ii].QFactor < LVPSA_MINQFACTOR) ||
- (pInitParams->pFiltersParams[ii].QFactor > LVPSA_MAXQFACTOR))
- {
- return(LVPSA_ERROR_INVALIDPARAM);
- }
- }
-
- /*
- * Instance memory
- */
-
- InstAlloc_AddMember( &Instance, sizeof(LVPSA_InstancePr_t) );
- InstAlloc_AddMember( &Instance, pInitParams->nBands * sizeof(LVM_FLOAT) );
- InstAlloc_AddMember( &Instance, pInitParams->nBands * sizeof(LVPSA_FilterParam_t) );
-
- {
- /* for avoiding QAC warnings as MUL32x32INTO32 works on LVM_INT32 only*/
- LVM_INT32 SDBD=(LVM_INT32)pInitParams->SpectralDataBufferDuration;
- LVM_INT32 IRTI=(LVM_INT32)LVPSA_InternalRefreshTimeInv;
- LVM_INT32 BL;
-
- MUL32x32INTO32(SDBD,IRTI,BL,LVPSA_InternalRefreshTimeShift)
- BufferLength=(LVM_UINT32)BL;
- }
-
- if((BufferLength * LVPSA_InternalRefreshTime) != pInitParams->SpectralDataBufferDuration)
- {
- BufferLength++;
- }
- InstAlloc_AddMember( &Instance, pInitParams->nBands * BufferLength * sizeof(LVM_UINT8) );
- InstAlloc_AddMember( &Instance, pInitParams->nBands * sizeof(LVM_UINT8) );
- InstAlloc_AddMember( &Instance, pInitParams->nBands * sizeof(LVPSA_BPFilterPrecision_en) );
- pMemoryTable->Region[LVPSA_MEMREGION_INSTANCE].Size = InstAlloc_GetTotal(&Instance);
- pMemoryTable->Region[LVPSA_MEMREGION_INSTANCE].Type = LVPSA_PERSISTENT;
- pMemoryTable->Region[LVPSA_MEMREGION_INSTANCE].pBaseAddress = LVM_NULL;
-
- /*
- * Scratch memory
- */
- InstAlloc_AddMember( &Scratch, 2 * pInitParams->MaxInputBlockSize * sizeof(LVM_FLOAT) );
- pMemoryTable->Region[LVPSA_MEMREGION_SCRATCH].Size = InstAlloc_GetTotal(&Scratch);
- pMemoryTable->Region[LVPSA_MEMREGION_SCRATCH].Type = LVPSA_SCRATCH;
- pMemoryTable->Region[LVPSA_MEMREGION_SCRATCH].pBaseAddress = LVM_NULL;
-
- /*
- * Persistent coefficients memory
- */
- InstAlloc_AddMember( &Coef, pInitParams->nBands * sizeof(Biquad_FLOAT_Instance_t) );
- InstAlloc_AddMember( &Coef, pInitParams->nBands * sizeof(QPD_FLOAT_State_t) );
- pMemoryTable->Region[LVPSA_MEMREGION_PERSISTENT_COEF].Size = InstAlloc_GetTotal(&Coef);
- pMemoryTable->Region[LVPSA_MEMREGION_PERSISTENT_COEF].Type = LVPSA_PERSISTENT_COEF;
- pMemoryTable->Region[LVPSA_MEMREGION_PERSISTENT_COEF].pBaseAddress = LVM_NULL;
-
- /*
- * Persistent data memory
- */
- InstAlloc_AddMember( &Data, pInitParams->nBands * sizeof(Biquad_1I_Order2_FLOAT_Taps_t) );
- InstAlloc_AddMember( &Data, pInitParams->nBands * sizeof(QPD_FLOAT_Taps_t) );
- pMemoryTable->Region[LVPSA_MEMREGION_PERSISTENT_DATA].Size = InstAlloc_GetTotal(&Data);
- pMemoryTable->Region[LVPSA_MEMREGION_PERSISTENT_DATA].Type = LVPSA_PERSISTENT_DATA;
- pMemoryTable->Region[LVPSA_MEMREGION_PERSISTENT_DATA].pBaseAddress = LVM_NULL;
-
- }
- else
- {
- /* Read back memory allocation table */
- *pMemoryTable = pLVPSA_Inst->MemoryTable;
- }
-
- return(LVPSA_OK);
-}
-
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Private.h b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Private.h
index 61987b5..fc67a75 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Private.h
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Private.h
@@ -27,16 +27,6 @@
CONSTANT DEFINITIONS
***********************************************************************************/
-/* Memory */
-#define LVPSA_INSTANCE_ALIGN 4 /* 32-bit alignment for structures */
-#define LVPSA_SCRATCH_ALIGN 4 /* 32-bit alignment for long data */
-#define LVPSA_COEF_ALIGN 4 /* 32-bit alignment for long words */
-#define LVPSA_DATA_ALIGN 4 /* 32-bit alignment for long data */
-
-#define LVPSA_MEMREGION_INSTANCE 0 /* Offset to instance memory region in memory table */
-#define LVPSA_MEMREGION_PERSISTENT_COEF 1 /* Offset to persistent coefficients memory region in memory table */
-#define LVPSA_MEMREGION_PERSISTENT_DATA 2 /* Offset to persistent taps memory region in memory table */
-#define LVPSA_MEMREGION_SCRATCH 3 /* Offset to scratch memory region in memory table */
#define LVPSA_NR_SUPPORTED_RATE 13 /* From 8000Hz to 192000Hz*/
#define LVPSA_NR_SUPPORTED_SPEED 3 /* LOW, MEDIUM, HIGH */
@@ -82,7 +72,8 @@
LVPSA_ControlParams_t CurrentParams; /* Current control parameters of the module */
LVPSA_ControlParams_t NewParams; /* New control parameters given by the user */
- LVPSA_MemTab_t MemoryTable;
+ void *pScratch;
+ /* Pointer to bundle scratch buffer */
LVPSA_BPFilterPrecision_en *pBPFiltersPrecision; /* Points a nBands elements array that contains the filter precision for each band */
Biquad_FLOAT_Instance_t *pBP_Instances;
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.cpp b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.cpp
index 81a88c5..b4d111e 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.cpp
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.cpp
@@ -79,8 +79,7 @@
{
return(LVPSA_ERROR_INVALIDPARAM);
}
-
- pScratch = (LVM_FLOAT*)pLVPSA_Inst->MemoryTable.Region[LVPSA_MEMREGION_SCRATCH].pBaseAddress;
+ pScratch = (LVM_FLOAT*)pLVPSA_Inst->pScratch;
pWrite_Save = pLVPSA_Inst->pSpectralDataBufferWritePointer;
/******************************************************************************
diff --git a/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h b/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h
index 0adfd1b..58ba8ad 100644
--- a/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h
+++ b/media/libeffects/lvm/lib/StereoWidening/lib/LVCS.h
@@ -71,13 +71,6 @@
/* */
/****************************************************************************************/
-/* Memory table */
-#define LVCS_MEMREGION_PERSISTENT_SLOW_DATA 0 /* Offset to the instance memory region */
-#define LVCS_MEMREGION_PERSISTENT_FAST_DATA 1 /* Offset to the persistent data memory region */
-#define LVCS_MEMREGION_PERSISTENT_FAST_COEF 2 /* Offset to the persistent coefficient memory region */
-#define LVCS_MEMREGION_TEMPORARY_FAST 3 /* Offset to temporary memory region */
-#define LVCS_NR_MEMORY_REGIONS 4 /* Number of memory regions */
-
/* Effect Level */
#define LVCS_EFFECT_LOW 16384 /* Effect scaling 50% */
#define LVCS_EFFECT_MEDIUM 24576 /* Effect scaling 75% */
@@ -104,24 +97,12 @@
LVCS_MAX = LVM_MAXENUM
} LVCS_Modes_en;
-/* Memory Types */
-typedef enum
-{
- LVCS_SCRATCH = 0,
- LVCS_DATA = 1,
- LVCS_COEFFICIENT = 2,
- LVCS_PERSISTENT = 3,
- LVCS_MEMORYTYPE_MAX = LVM_MAXENUM
-} LVCS_MemoryTypes_en;
-
/* Function return status */
typedef enum
{
LVCS_SUCCESS = 0, /* Successful return from a routine */
- LVCS_ALIGNMENTERROR = 1, /* Memory alignment error */
- LVCS_NULLADDRESS = 2, /* NULL allocation address */
- LVCS_TOOMANYSAMPLES = 3, /* Maximum block size exceeded */
- LVCS_INVALIDBUFFER = 4, /* Invalid buffer processing request */
+ LVCS_NULLADDRESS = 1, /* NULL allocation address */
+ LVCS_TOOMANYSAMPLES = 2, /* Maximum block size exceeded */
LVCS_STATUSMAX = LVM_MAXENUM
} LVCS_ReturnStatus_en;
@@ -166,20 +147,6 @@
/* */
/****************************************************************************************/
-/* Memory region definition */
-typedef struct
-{
- LVM_UINT32 Size; /* Region size in bytes */
- LVCS_MemoryTypes_en Type; /* Region type */
- void *pBaseAddress; /* Pointer to the region base address */
-} LVCS_MemoryRegion_t;
-
-/* Memory table containing the region definitions */
-typedef struct
-{
- LVCS_MemoryRegion_t Region[LVCS_NR_MEMORY_REGIONS]; /* One definition for each region */
-} LVCS_MemTab_t;
-
/* Concert Sound parameter structure */
typedef struct
{
@@ -190,9 +157,7 @@
LVM_Fs_en SampleRate; /* Sampling rate */
LVM_INT16 EffectLevel; /* Effect level */
LVM_UINT16 ReverbLevel; /* Reverb level in % */
-#ifdef SUPPORT_MC
LVM_INT32 NrChannels;
-#endif
} LVCS_Params_t;
/* Concert Sound Capability structure */
@@ -213,82 +178,45 @@
/* */
/****************************************************************************************/
-/****************************************************************************************/
-/* */
-/* FUNCTION: LVCS_Memory */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) it is */
-/* passed the default capabilities, of these only the buffer processing setting is */
-/* used. */
-/* */
-/* When called for memory allocation the memory base address pointers are NULL on */
-/* return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the */
-/* capabilities are ignored and the memory table returns the allocated memory and */
-/* base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* pCapabilities Pointer to the default capabilites */
-/* */
-/* RETURNS: */
-/* LVCS_Success Succeeded */
-/* */
-/* NOTES: */
-/* 1. This function may be interrupted by the LVCS_Process function */
-/* */
-/****************************************************************************************/
-
-LVCS_ReturnStatus_en LVCS_Memory(LVCS_Handle_t hInstance,
- LVCS_MemTab_t *pMemoryTable,
- LVCS_Capabilities_t *pCapabilities);
-
-/****************************************************************************************/
-/* */
-/* FUNCTION: LVCS_Init */
-/* */
-/* DESCRIPTION: */
-/* Create and initialisation function for the Concert Sound module */
-/* */
-/* This function can be used to create an algorithm instance by calling with */
-/* hInstance set to NULL. In this case the algorithm returns the new instance */
-/* handle. */
-/* */
-/* This function can be used to force a full re-initialisation of the algorithm */
-/* by calling with hInstance = Instance Handle. In this case the memory table */
-/* should be correct for the instance, this can be ensured by calling the function */
-/* LVCS_Memory before calling this function. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance handle */
-/* pMemoryTable Pointer to the memory definition table */
-/* pCapabilities Pointer to the initialisation capabilities */
-/* */
-/* RETURNS: */
-/* LVCS_Success Initialisation succeeded */
-/* LVCS_AlignmentError Instance or scratch memory on incorrect alignment */
-/* LVCS_NullAddress Instance or scratch memory has a NULL pointer */
-/* */
-/* NOTES: */
-/* 1. The instance handle is the pointer to the base address of the first memory */
-/* region. */
-/* 2. This function must not be interrupted by the LVCS_Process function */
-/* */
-/****************************************************************************************/
-
+/************************************************************************************/
+/* */
+/* FUNCTION: LVCS_Init */
+/* */
+/* DESCRIPTION: */
+/* Create and initialisation function for the Concert Sound module */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to instance handle */
+/* pCapabilities Pointer to the capabilities structure */
+/* pScratch Pointer to the scratch buffer */
+/* */
+/* RETURNS: */
+/* LVCS_Success Initialisation succeeded */
+/* LVDBE_NULLADDRESS One or more memory has a NULL pointer */
+/* */
+/* NOTES: */
+/* 1. This function must not be interrupted by the LVCS_Process function */
+/* */
+/************************************************************************************/
LVCS_ReturnStatus_en LVCS_Init(LVCS_Handle_t *phInstance,
- LVCS_MemTab_t *pMemoryTable,
- LVCS_Capabilities_t *pCapabilities);
+ LVCS_Capabilities_t *pCapabilities,
+ void *pScratch);
+
+/************************************************************************************/
+/* */
+/* FUNCTION: LVCS_DeInit */
+/* */
+/* DESCRIPTION: */
+/* Free memories created during the LVCS_Init call including instance handle */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to instance handle */
+/* */
+/* NOTES: */
+/* 1. This function must not be interrupted by the LVCS_Process function */
+/* */
+/************************************************************************************/
+void LVCS_DeInit(LVCS_Handle_t *phInstance);
/****************************************************************************************/
/* */
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Equaliser.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Equaliser.cpp
index 431b7e3..abadae3 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Equaliser.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Equaliser.cpp
@@ -65,11 +65,8 @@
BQ_FLOAT_Coefs_t Coeffs;
const BiquadA012B12CoefsSP_t *pEqualiserCoefTable;
- pData = (LVCS_Data_t *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_PERSISTENT_FAST_DATA].pBaseAddress;
-
- pCoefficients = (LVCS_Coefficient_t *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].pBaseAddress;
+ pData = (LVCS_Data_t *)pInstance->pData;
+ pCoefficients = (LVCS_Coefficient_t *)pInstance->pCoeff;
/*
* If the sample rate changes re-initialise the filters
*/
@@ -144,8 +141,7 @@
LVCS_Equaliser_t *pConfig = (LVCS_Equaliser_t *)&pInstance->Equaliser;
LVCS_Coefficient_t *pCoefficients;
- pCoefficients = (LVCS_Coefficient_t *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].pBaseAddress;
+ pCoefficients = (LVCS_Coefficient_t *)pInstance->pCoeff;
/*
* Check if the equaliser is required
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Init.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Init.cpp
index 630ecf7..312885c 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Init.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Init.cpp
@@ -20,99 +20,11 @@
/* Includes */
/* */
/************************************************************************************/
-
+#include <stdlib.h>
#include "LVCS.h"
#include "LVCS_Private.h"
#include "LVCS_Tables.h"
-/****************************************************************************************/
-/* */
-/* FUNCTION: LVCS_Memory */
-/* */
-/* DESCRIPTION: */
-/* This function is used for memory allocation and free. It can be called in */
-/* two ways: */
-/* */
-/* hInstance = NULL Returns the memory requirements */
-/* hInstance = Instance handle Returns the memory requirements and */
-/* allocated base addresses for the instance */
-/* */
-/* When this function is called for memory allocation (hInstance=NULL) it is */
-/* passed the default capabilities. */
-/* */
-/* When called for memory allocation the memory base address pointers are NULL on */
-/* return. */
-/* */
-/* When the function is called for free (hInstance = Instance Handle) the */
-/* capabilities are ignored and the memory table returns the allocated memory and */
-/* base addresses used during initialisation. */
-/* */
-/* PARAMETERS: */
-/* hInstance Instance Handle */
-/* pMemoryTable Pointer to an empty memory definition table */
-/* pCapabilities Pointer to the default capabilites */
-/* */
-/* RETURNS: */
-/* LVCS_Success Succeeded */
-/* */
-/* NOTES: */
-/* 1. This function may be interrupted by the LVCS_Process function */
-/* */
-/****************************************************************************************/
-
-LVCS_ReturnStatus_en LVCS_Memory(LVCS_Handle_t hInstance,
- LVCS_MemTab_t *pMemoryTable,
- LVCS_Capabilities_t *pCapabilities)
-{
-
- LVM_UINT32 ScratchSize;
- LVCS_Instance_t *pInstance = (LVCS_Instance_t *)hInstance;
-
- /*
- * Fill in the memory table
- */
- if (hInstance == LVM_NULL)
- {
- /*
- * Instance memory
- */
- pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_SLOW_DATA].Size = (LVM_UINT32)sizeof(LVCS_Instance_t);
- pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_SLOW_DATA].Type = LVCS_PERSISTENT;
- pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_SLOW_DATA].pBaseAddress = LVM_NULL;
-
- /*
- * Data memory
- */
- pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_FAST_DATA].Size = (LVM_UINT32)sizeof(LVCS_Data_t);
- pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_FAST_DATA].Type = LVCS_DATA;
- pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_FAST_DATA].pBaseAddress = LVM_NULL;
-
- /*
- * Coefficient memory
- */
- pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].Size = (LVM_UINT32)sizeof(LVCS_Coefficient_t);
- pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].Type = LVCS_COEFFICIENT;
- pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].pBaseAddress = LVM_NULL;
-
- /*
- * Scratch memory
- */
- /* Inplace processing */
- ScratchSize = (LVM_UINT32) \
- (LVCS_SCRATCHBUFFERS * sizeof(LVM_FLOAT) * pCapabilities->MaxBlockSize);
- pMemoryTable->Region[LVCS_MEMREGION_TEMPORARY_FAST].Size = ScratchSize;
- pMemoryTable->Region[LVCS_MEMREGION_TEMPORARY_FAST].Type = LVCS_SCRATCH;
- pMemoryTable->Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress = LVM_NULL;
- }
- else
- {
- /* Read back memory allocation table */
- *pMemoryTable = pInstance->MemoryTable;
- }
-
- return(LVCS_SUCCESS);
-}
-
/************************************************************************************/
/* */
/* FUNCTION: LVCS_Init */
@@ -120,46 +32,38 @@
/* DESCRIPTION: */
/* Create and initialisation function for the Concert Sound module */
/* */
-/* This function can be used to create an algorithm instance by calling with */
-/* hInstance set to LVM_NULL. In this case the algorithm returns the new instance */
-/* handle. */
-/* */
-/* This function can be used to force a full re-initialisation of the algorithm */
-/* by calling with hInstance = Instance Handle. In this case the memory table */
-/* should be correct for the instance, this can be ensured by calling the function */
-/* LVCS_Memory before calling this function. */
-/* */
/* PARAMETERS: */
-/* hInstance Instance handle */
-/* pMemoryTable Pointer to the memory definition table */
+/* phInstance Pointer to instance handle */
/* pCapabilities Pointer to the capabilities structure */
+/* pScratch Pointer to scratch buffer */
/* */
/* RETURNS: */
/* LVCS_Success Initialisation succeeded */
+/* LVDBE_NULLADDRESS One or more memory has a NULL pointer - malloc failure */
/* */
/* NOTES: */
-/* 1. The instance handle is the pointer to the base address of the first memory */
-/* region. */
-/* 2. This function must not be interrupted by the LVCS_Process function */
-/* 3. This function must be called with the same capabilities as used for the */
-/* call to the memory function */
+/* 1. This function must not be interrupted by the LVCS_Process function */
/* */
/************************************************************************************/
LVCS_ReturnStatus_en LVCS_Init(LVCS_Handle_t *phInstance,
- LVCS_MemTab_t *pMemoryTable,
- LVCS_Capabilities_t *pCapabilities)
+ LVCS_Capabilities_t *pCapabilities,
+ void *pScratch)
{
- LVCS_Instance_t *pInstance;
- LVCS_VolCorrect_t *pLVCS_VolCorrectTable;
+ LVCS_Instance_t *pInstance;
+ LVCS_VolCorrect_t *pLVCS_VolCorrectTable;
/*
- * Set the instance handle if not already initialised
+ * Create the instance handle if not already initialised
*/
if (*phInstance == LVM_NULL)
{
- *phInstance = (LVCS_Handle_t)pMemoryTable->Region[LVCS_MEMREGION_PERSISTENT_SLOW_DATA].pBaseAddress;
+ *phInstance = calloc(1, sizeof(*pInstance));
+ }
+ if (*phInstance == LVM_NULL)
+ {
+ return LVCS_NULLADDRESS;
}
pInstance =(LVCS_Instance_t *)*phInstance;
@@ -168,10 +72,7 @@
*/
pInstance->Capabilities = *pCapabilities;
- /*
- * Save the memory table in the instance structure
- */
- pInstance->MemoryTable = *pMemoryTable;
+ pInstance->pScratch = pScratch;
/*
* Set all initial parameters to invalid to force a full initialisation
@@ -208,3 +109,35 @@
return(LVCS_SUCCESS);
}
+/************************************************************************************/
+/* */
+/* FUNCTION: LVCS_DeInit */
+/* */
+/* DESCRIPTION: */
+/* Free memories created during the LVCS_Init call including instance handle */
+/* */
+/* PARAMETERS: */
+/* phInstance Pointer to instance handle */
+/* */
+/* NOTES: */
+/* 1. This function must not be interrupted by the LVCS_Process function */
+/* */
+/************************************************************************************/
+void LVCS_DeInit(LVCS_Handle_t *phInstance)
+{
+ LVCS_Instance_t *pInstance = (LVCS_Instance_t *)*phInstance;
+ if (pInstance == LVM_NULL) {
+ return;
+ }
+ if (pInstance->pCoeff != LVM_NULL) {
+ free(pInstance->pCoeff);
+ pInstance->pCoeff = LVM_NULL;
+ }
+ if (pInstance->pData != LVM_NULL) {
+ free(pInstance->pData);
+ pInstance->pData = LVM_NULL;
+ }
+ free(pInstance);
+ *phInstance = LVM_NULL;
+ return;
+}
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
index 154ea55..7adfb50 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
@@ -54,12 +54,7 @@
#define LVCS_COMPGAINFRAME 64 /* Compressor gain update interval */
/* Memory */
-#ifdef SUPPORT_MC
#define LVCS_SCRATCHBUFFERS 8 /* Number of buffers required for inplace processing */
-#else
-#define LVCS_SCRATCHBUFFERS 6 /* Number of buffers required for inplace processing */
-#endif
-#ifdef SUPPORT_MC
/*
* The Concert Surround module applies processing only on the first two
* channels of a multichannel input. The data of first two channels is copied
@@ -67,7 +62,6 @@
* are used for this purpose
*/
#define LVCS_MC_SCRATCHBUFFERS 2
-#endif
/* General */
#define LVCS_INVALID 0xFFFF /* Invalid init parameter */
@@ -110,7 +104,6 @@
typedef struct
{
/* Public parameters */
- LVCS_MemTab_t MemoryTable; /* Instance memory allocation table */
LVCS_Params_t Params; /* Instance parameters */
LVCS_Capabilities_t Capabilities; /* Initialisation capabilities */
@@ -133,6 +126,9 @@
LVM_INT16 bTimerDone; /* Timer completion flag */
LVM_Timer_Params_t TimerParams; /* Timer parameters */
LVM_Timer_Instance_t TimerInstance; /* Timer instance */
+ void *pCoeff; /* pointer to buffer for equaliser filter coeffs */
+ void *pData; /* pointer to buffer for equaliser filter states */
+ void *pScratch; /* Pointer to bundle scratch buffer */
} LVCS_Instance_t;
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.cpp
index 8e09be2..72b4c8b 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.cpp
@@ -74,7 +74,6 @@
LVCS_Instance_t *pInstance = (LVCS_Instance_t *)hInstance;
LVM_FLOAT *pScratch;
LVCS_ReturnStatus_en err;
-#ifdef SUPPORT_MC
LVM_FLOAT *pStIn;
LVM_INT32 channels = pInstance->Params.NrChannels;
#define NrFrames NumSamples // alias for clarity
@@ -89,15 +88,12 @@
{
channels = 2;
}
-#endif
- pScratch = (LVM_FLOAT *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress;
+ pScratch = (LVM_FLOAT *)pInstance->pScratch;
/*
* Check if the processing is inplace
*/
-#ifdef SUPPORT_MC
/*
* The pInput buffer holds the first 2 (Left, Right) channels information.
* Hence the memory required by this buffer is 2 * NumFrames.
@@ -115,35 +111,13 @@
Copy_Float((LVM_FLOAT *)pInput,
(LVM_FLOAT *)pStIn,
(LVM_INT16)(2 * NrFrames));
-#else
- if (pInData == pOutData)
- {
- /* Processing inplace */
- pInput = pScratch + (2 * NumSamples);
- Copy_Float((LVM_FLOAT *)pInData, /* Source */
- (LVM_FLOAT *)pInput, /* Destination */
- (LVM_INT16)(2 * NumSamples)); /* Left and right */
- }
- else
- {
- /* Processing outplace */
- pInput = pInData;
- }
-#endif
/*
* Call the stereo enhancer
*/
-#ifdef SUPPORT_MC
err = LVCS_StereoEnhancer(hInstance, /* Instance handle */
pStIn, /* Pointer to the input data */
pOutData, /* Pointer to the output data */
NrFrames); /* Number of frames to process */
-#else
- err = LVCS_StereoEnhancer(hInstance, /* Instance handle */
- pInData, /* Pointer to the input data */
- pOutData, /* Pointer to the output data */
- NumSamples); /* Number of samples to process */
-#endif
/*
* Call the reverb generator
@@ -210,7 +184,6 @@
LVCS_Instance_t *pInstance = (LVCS_Instance_t *)hInstance;
LVCS_ReturnStatus_en err;
-#ifdef SUPPORT_MC
/*Extract number of Channels info*/
LVM_INT32 channels = pInstance->Params.NrChannels;
#define NrFrames NumSamples // alias for clarity
@@ -218,7 +191,6 @@
{
channels = 2;
}
-#endif
/*
* Check the number of samples is not too large
*/
@@ -232,7 +204,6 @@
*/
if (pInstance->Params.OperatingMode != LVCS_OFF)
{
-#ifdef SUPPORT_MC
LVM_FLOAT *pStereoOut;
/*
* LVCS_Process_CS uses output buffer to store intermediate outputs of StereoEnhancer,
@@ -248,10 +219,8 @@
* second and fourth are used as input buffers by pInput and pStIn in LVCS_Process_CS.
* Hence, pStereoOut is pointed to use unused third portion of scratch memory.
*/
- pStereoOut = (LVM_FLOAT *) \
- pInstance->MemoryTable. \
- Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress +
- ((LVCS_SCRATCHBUFFERS - 4) * NrFrames);
+ pStereoOut = (LVM_FLOAT *)pInstance->pScratch +
+ ((LVCS_SCRATCHBUFFERS - 4) * NrFrames);
}
else
{
@@ -265,12 +234,6 @@
pInData,
pStereoOut,
NrFrames);
-#else
- err = LVCS_Process_CS(hInstance,
- pInData,
- pOutData,
- NumSamples);
-#endif
/*
* Compress to reduce expansion effect of Concert Sound and correct volume
@@ -289,17 +252,10 @@
if(NumSamples < LVCS_COMPGAINFRAME)
{
-#ifdef SUPPORT_MC
NonLinComp_Float(Gain, /* Compressor gain setting */
pStereoOut,
pStereoOut,
(LVM_INT32)(2 * NrFrames));
-#else
- NonLinComp_Float(Gain, /* Compressor gain setting */
- pOutData,
- pOutData,
- (LVM_INT32)(2 * NumSamples));
-#endif
}
else
{
@@ -328,11 +284,7 @@
FinalGain = Gain;
Gain = pInstance->CompressGain;
-#ifdef SUPPORT_MC
pOutPtr = pStereoOut;
-#else
- pOutPtr = pOutData;
-#endif
while(SampleToProcess > 0)
{
@@ -396,33 +348,22 @@
(LVM_INT16)NumSamples);
}
}
-#ifdef SUPPORT_MC
Copy_Float_Stereo_Mc(pInData,
pStereoOut,
pOutData,
NrFrames,
channels);
-#endif
}
else
{
if (pInData != pOutData)
{
-#ifdef SUPPORT_MC
/*
* The algorithm is disabled so just copy the data
*/
Copy_Float((LVM_FLOAT *)pInData, /* Source */
(LVM_FLOAT *)pOutData, /* Destination */
(LVM_INT16)(channels * NrFrames)); /* All Channels*/
-#else
- /*
- * The algorithm is disabled so just copy the data
- */
- Copy_Float((LVM_FLOAT *)pInData, /* Source */
- (LVM_FLOAT *)pOutData, /* Destination */
- (LVM_INT16)(2 * NumSamples)); /* Left and right */
-#endif
}
}
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
index d0e6e09..441b667 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
@@ -20,7 +20,7 @@
/* Includes */
/* */
/************************************************************************************/
-
+#include <stdlib.h>
#include "LVCS.h"
#include "LVCS_Private.h"
#include "LVCS_ReverbGenerator.h"
@@ -70,11 +70,31 @@
BQ_FLOAT_Coefs_t Coeffs;
const BiquadA012B12CoefsSP_t *pReverbCoefTable;
- pData = (LVCS_Data_t *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_PERSISTENT_FAST_DATA].pBaseAddress;
-
- pCoefficients = (LVCS_Coefficient_t *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].pBaseAddress;
+ if (pInstance->pData == LVM_NULL)
+ {
+ pInstance->pData = pData = (LVCS_Data_t *)calloc(1, sizeof(*pData));
+ if (pData == LVM_NULL)
+ {
+ return LVCS_NULLADDRESS;
+ }
+ }
+ else
+ {
+ pData = (LVCS_Data_t *)pInstance->pData;
+ }
+ if (pInstance->pCoeff == LVM_NULL)
+ {
+ pInstance->pCoeff = pCoefficients = (LVCS_Coefficient_t *)calloc(1, \
+ sizeof(*pCoefficients));
+ if (pCoefficients == LVM_NULL)
+ {
+ return LVCS_NULLADDRESS;
+ }
+ }
+ else
+ {
+ pCoefficients = (LVCS_Coefficient_t *)pInstance->pCoeff;
+ }
/*
* Initialise the delay and filters if:
@@ -192,11 +212,8 @@
LVCS_Coefficient_t *pCoefficients;
LVM_FLOAT *pScratch;
- pCoefficients = (LVCS_Coefficient_t *)\
- pInstance->MemoryTable.Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].pBaseAddress;
-
- pScratch = (LVM_FLOAT *)\
- pInstance->MemoryTable.Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress;
+ pCoefficients = (LVCS_Coefficient_t *)pInstance->pCoeff;
+ pScratch = (LVM_FLOAT *)pInstance->pScratch;
/*
* Copy the data to the output in outplace processing
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_StereoEnhancer.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_StereoEnhancer.cpp
index 7fd8444..6929015 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_StereoEnhancer.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_StereoEnhancer.cpp
@@ -62,11 +62,8 @@
BQ_FLOAT_Coefs_t CoeffsSide;
const BiquadA012B12CoefsSP_t *pSESideCoefs;
- pData = (LVCS_Data_t *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_PERSISTENT_FAST_DATA].pBaseAddress;
-
- pCoefficient = (LVCS_Coefficient_t *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].pBaseAddress;
+ pData = (LVCS_Data_t *)pInstance->pData;
+ pCoefficient = (LVCS_Coefficient_t *)pInstance->pCoeff;
/*
* If the sample rate or speaker type has changed update the filters
@@ -188,12 +185,8 @@
LVCS_StereoEnhancer_t *pConfig = (LVCS_StereoEnhancer_t *)&pInstance->StereoEnhancer;
LVCS_Coefficient_t *pCoefficient;
LVM_FLOAT *pScratch;
-
- pCoefficient = (LVCS_Coefficient_t *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_PERSISTENT_FAST_COEF].pBaseAddress;
-
- pScratch = (LVM_FLOAT *) \
- pInstance->MemoryTable.Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress;
+ pCoefficient = (LVCS_Coefficient_t *)pInstance->pCoeff;
+ pScratch = (LVM_FLOAT *)pInstance->pScratch;
/*
* Check if the Stereo Enhancer is enabled
*/
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests_reverb.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests_reverb.sh
index 5a972db..0c3b0b5 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests_reverb.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests_reverb.sh
@@ -41,47 +41,65 @@
192000
)
+flags_arr=(
+ "--M --fch 1"
+ "--fch 2"
+)
+
# run reverb at different configs, saving only the stereo channel
# pair.
error_count=0
+testcase_count=0
for cmd in "${cmds[@]}"
do
$cmd
- for preset_val in {0..6}
+ for flags in "${flags_arr[@]}"
do
- for fs in ${fs_arr[*]}
+ for preset_val in {0..6}
do
- for chMask in {1..22}
+ for fs in ${fs_arr[*]}
do
- adb shell LD_LIBRARY_PATH=/system/vendor/lib/soundfx $testdir/reverb_test \
- --input $testdir/sinesweepraw.raw \
- --output $testdir/sinesweep_$((chMask))_$((fs)).raw \
- --chMask $chMask --fs $fs --preset $preset_val
+ for chMask in {0..22}
+ do
+ adb shell LD_LIBRARY_PATH=/system/vendor/lib/soundfx $testdir/reverb_test \
+ --input $testdir/sinesweepraw.raw \
+ --output $testdir/sinesweep_$((chMask))_$((fs)).raw \
+ --chMask $chMask $flags --fs $fs --preset $preset_val
- shell_ret=$?
- if [ $shell_ret -ne 0 ]; then
- echo "error: $shell_ret"
- ((++error_count))
- fi
+ shell_ret=$?
+ if [ $shell_ret -ne 0 ]; then
+ echo "error: $shell_ret"
+ ((++error_count))
+ fi
- # two channel files should be identical to higher channel
- # computation (first 2 channels).
- if [[ "$chMask" -gt 1 ]]
- then
- adb shell cmp $testdir/sinesweep_1_$((fs)).raw \
- $testdir/sinesweep_$((chMask))_$((fs)).raw
- fi
- # cmp returns EXIT_FAILURE on mismatch.
- shell_ret=$?
- if [ $shell_ret -ne 0 ]; then
- echo "error: $shell_ret"
- ((++error_count))
- fi
+ if [[ "$chMask" -gt 0 ]] && [[ $flags != *"--fch 2"* ]]
+ then
+ # single channel files should be identical to higher channel
+ # computation (first channel).
+ adb shell cmp $testdir/sinesweep_0_$((fs)).raw \
+ $testdir/sinesweep_$((chMask))_$((fs)).raw
+ elif [[ "$chMask" -gt 1 ]]
+ then
+ # two channel files should be identical to higher channel
+ # computation (first 2 channels).
+ adb shell cmp $testdir/sinesweep_1_$((fs)).raw \
+ $testdir/sinesweep_$((chMask))_$((fs)).raw
+ fi
+
+ # cmp returns EXIT_FAILURE on mismatch.
+ shell_ret=$?
+ if [ $shell_ret -ne 0 ]; then
+ echo "error: $shell_ret"
+ ((++error_count))
+ fi
+ ((++testcase_count))
+ done
done
done
done
done
adb shell rm -r $testdir
+echo "$testcase_count tests performed"
echo "$error_count errors"
exit $error_count
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index a4ace6c..59b27ad 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -182,49 +182,6 @@
printf("\n Enable Equalizer");
}
-//----------------------------------------------------------------------------
-// LvmEffect_free()
-//----------------------------------------------------------------------------
-// Purpose: Free all memory associated with the Bundle.
-//
-// Inputs:
-// pContext: effect engine context
-//
-// Outputs:
-//
-//----------------------------------------------------------------------------
-
-void LvmEffect_free(struct EffectContext *pContext) {
- LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
- LVM_MemTab_t MemTab;
-
- /* Free the algorithm memory */
- LvmStatus = LVM_GetMemoryTable(pContext->pBundledContext->hInstance, &MemTab,
- LVM_NULL);
-
- LVM_ERROR_CHECK(LvmStatus, "LVM_GetMemoryTable", "LvmEffect_free")
-
- for (int i = 0; i < LVM_NR_MEMORY_REGIONS; i++) {
- if (MemTab.Region[i].Size != 0) {
- if (MemTab.Region[i].pBaseAddress != NULL) {
- ALOGV("\tLvmEffect_free - START freeing %" PRIu32
- " bytes for region %u at %p\n",
- MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
-
- free(MemTab.Region[i].pBaseAddress);
-
- ALOGV("\tLvmEffect_free - END freeing %" PRIu32
- " bytes for region %u at %p\n",
- MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
- } else {
- ALOGE(
- "\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer "
- "%" PRIu32 " bytes for region %u at %p ERROR\n",
- MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
- }
- }
- }
-} /* end LvmEffect_free */
//----------------------------------------------------------------------------
// LvmBundle_init()
@@ -263,8 +220,7 @@
ALOGV(
"\tLvmBundle_init pContext->pBassBoost != NULL "
"-> Calling pContext->pBassBoost->free()");
-
- LvmEffect_free(pContext);
+ LVM_DelInstanceHandle(&pContext->pBundledContext->hInstance);
ALOGV(
"\tLvmBundle_init pContext->pBassBoost != NULL "
@@ -276,8 +232,6 @@
LVM_EQNB_BandDef_t BandDefs[MAX_NUM_BANDS]; /* Equaliser band definitions */
LVM_HeadroomParams_t HeadroomParams; /* Headroom parameters */
LVM_HeadroomBandDef_t HeadroomBandDef[LVM_HEADROOM_MAX_NBANDS];
- LVM_MemTab_t MemTab; /* Memory allocation table */
- bool bMallocFailure = LVM_FALSE;
/* Set the capabilities */
InstParams.BufferMode = LVM_UNMANAGED_BUFFERS;
@@ -285,63 +239,8 @@
InstParams.EQNB_NumBands = MAX_NUM_BANDS;
InstParams.PSA_Included = LVM_PSA_ON;
- /* Allocate memory, forcing alignment */
- LvmStatus = LVM_GetMemoryTable(LVM_NULL, &MemTab, &InstParams);
-
- LVM_ERROR_CHECK(LvmStatus, "LVM_GetMemoryTable", "LvmBundle_init");
- if (LvmStatus != LVM_SUCCESS) return -EINVAL;
-
- ALOGV("\tCreateInstance Succesfully called LVM_GetMemoryTable\n");
-
- /* Allocate memory */
- for (int i = 0; i < LVM_NR_MEMORY_REGIONS; i++) {
- if (MemTab.Region[i].Size != 0) {
- MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size);
-
- if (MemTab.Region[i].pBaseAddress == LVM_NULL) {
- ALOGE(
- "\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate "
- "%" PRIu32 " bytes for region %u\n",
- MemTab.Region[i].Size, i);
- bMallocFailure = LVM_TRUE;
- break;
- } else {
- ALOGV("\tLvmBundle_init CreateInstance allocated %" PRIu32
- " bytes for region %u at %p\n",
- MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
- }
- }
- }
-
- /* If one or more of the memory regions failed to allocate, free the regions
- * that were
- * succesfully allocated and return with an error
- */
- if (bMallocFailure == LVM_TRUE) {
- for (int i = 0; i < LVM_NR_MEMORY_REGIONS; i++) {
- if (MemTab.Region[i].pBaseAddress == LVM_NULL) {
- ALOGE(
- "\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate "
- "%" PRIu32 " bytes for region %u Not freeing\n",
- MemTab.Region[i].Size, i);
- } else {
- ALOGE(
- "\tLVM_ERROR :LvmBundle_init CreateInstance Failed: but allocated "
- "%" PRIu32 " bytes for region %u at %p- free\n",
- MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
- free(MemTab.Region[i].pBaseAddress);
- }
- }
- return -EINVAL;
- }
- ALOGV("\tLvmBundle_init CreateInstance Succesfully malloc'd memory\n");
-
- /* Initialise */
- pContext->pBundledContext->hInstance = LVM_NULL;
-
- /* Init sets the instance handle */
LvmStatus = LVM_GetInstanceHandle(&pContext->pBundledContext->hInstance,
- &MemTab, &InstParams);
+ &InstParams);
LVM_ERROR_CHECK(LvmStatus, "LVM_GetInstanceHandle", "LvmBundle_init");
if (LvmStatus != LVM_SUCCESS) return -EINVAL;
@@ -812,7 +711,7 @@
/* Free the allocated buffers */
if (context.pBundledContext != nullptr) {
if (context.pBundledContext->hInstance != nullptr) {
- LvmEffect_free(&context);
+ LVM_DelInstanceHandle(&context.pBundledContext->hInstance);
}
free(context.pBundledContext);
}
diff --git a/media/libeffects/lvm/tests/reverb_test.cpp b/media/libeffects/lvm/tests/reverb_test.cpp
index a9cf348..f403229 100644
--- a/media/libeffects/lvm/tests/reverb_test.cpp
+++ b/media/libeffects/lvm/tests/reverb_test.cpp
@@ -297,6 +297,9 @@
config.inputCfg.samplingRate = config.outputCfg.samplingRate = revConfigParams.sampleRate;
config.inputCfg.channels = config.outputCfg.channels = revConfigParams.chMask;
config.inputCfg.format = config.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+ if (AUDIO_CHANNEL_OUT_MONO == revConfigParams.chMask) {
+ config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ }
if (int status =
reverbCreateEffect(&effectHandle, &config, sessionId, ioId, revConfigParams.auxiliary);
status != 0) {
@@ -332,15 +335,15 @@
* Mono input will be converted to 2 channels internally in the process call
* by copying the same data into the second channel.
* Hence when channelCount is 1, output buffer should be allocated for
- * 2 channels. The memAllocChCount takes care of allocation of sufficient
+ * 2 channels. The outChannelCount takes care of allocation of sufficient
* memory for the output buffer.
*/
- const int memAllocChCount = (channelCount == 1 ? 2 : channelCount);
+ const int outChannelCount = (channelCount == 1 ? 2 : channelCount);
std::vector<short> in(frameLength * maxChannelCount);
std::vector<short> out(frameLength * maxChannelCount);
std::vector<float> floatIn(frameLength * channelCount);
- std::vector<float> floatOut(frameLength * memAllocChCount);
+ std::vector<float> floatOut(frameLength * outChannelCount);
int frameCounter = 0;
@@ -374,11 +377,11 @@
#else
memcpy(floatOut.data(), floatIn.data(), frameLength * frameSize);
#endif
- memcpy_to_i16_from_float(out.data(), floatOut.data(), frameLength * channelCount);
+ memcpy_to_i16_from_float(out.data(), floatOut.data(), frameLength * outChannelCount);
- if (ioChannelCount != channelCount) {
- adjust_channels(out.data(), channelCount, out.data(), ioChannelCount, sizeof(short),
- frameLength * channelCount * sizeof(short));
+ if (ioChannelCount != outChannelCount) {
+ adjust_channels(out.data(), outChannelCount, out.data(), ioChannelCount, sizeof(short),
+ frameLength * outChannelCount * sizeof(short));
}
(void)fwrite(out.data(), ioFrameSize, frameLength, outputFp.get());
frameCounter += frameLength;
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index afc4220..f08caec 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -13,7 +13,6 @@
cppflags: [
"-fvisibility=hidden",
- "-DSUPPORT_MC",
"-Wall",
"-Werror",
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 6fca0e7..dac283e 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -136,7 +136,6 @@
int LvmBundle_init (EffectContext *pContext);
int LvmEffect_enable (EffectContext *pContext);
int LvmEffect_disable (EffectContext *pContext);
-void LvmEffect_free (EffectContext *pContext);
int Effect_setConfig (EffectContext *pContext, effect_config_t *pConfig);
void Effect_getConfig (EffectContext *pContext, effect_config_t *pConfig);
int BassBoost_setParameter (EffectContext *pContext,
@@ -433,7 +432,7 @@
pSessionContext->bBundledEffectsEnabled = LVM_FALSE;
pSessionContext->pBundledContext = LVM_NULL;
ALOGV("\tEffectRelease: Freeing LVM Bundle memory\n");
- LvmEffect_free(pContext);
+ LVM_DelInstanceHandle(&pContext->pBundledContext->hInstance);
ALOGV("\tEffectRelease: Deleting LVM Bundle context %p\n", pContext->pBundledContext);
if (pContext->pBundledContext->workBuffer != NULL) {
free(pContext->pBundledContext->workBuffer);
@@ -529,8 +528,7 @@
if (pContext->pBundledContext->hInstance != NULL){
ALOGV("\tLvmBundle_init pContext->pBassBoost != NULL "
"-> Calling pContext->pBassBoost->free()");
-
- LvmEffect_free(pContext);
+ LVM_DelInstanceHandle(&pContext->pBundledContext->hInstance);
ALOGV("\tLvmBundle_init pContext->pBassBoost != NULL "
"-> Called pContext->pBassBoost->free()");
@@ -542,8 +540,6 @@
LVM_EQNB_BandDef_t BandDefs[MAX_NUM_BANDS]; /* Equaliser band definitions */
LVM_HeadroomParams_t HeadroomParams; /* Headroom parameters */
LVM_HeadroomBandDef_t HeadroomBandDef[LVM_HEADROOM_MAX_NBANDS];
- LVM_MemTab_t MemTab; /* Memory allocation table */
- bool bMallocFailure = LVM_FALSE;
/* Set the capabilities */
InstParams.BufferMode = LVM_UNMANAGED_BUFFERS;
@@ -551,58 +547,7 @@
InstParams.EQNB_NumBands = MAX_NUM_BANDS;
InstParams.PSA_Included = LVM_PSA_ON;
- /* Allocate memory, forcing alignment */
- LvmStatus = LVM_GetMemoryTable(LVM_NULL,
- &MemTab,
- &InstParams);
-
- LVM_ERROR_CHECK(LvmStatus, "LVM_GetMemoryTable", "LvmBundle_init")
- if(LvmStatus != LVM_SUCCESS) return -EINVAL;
-
- ALOGV("\tCreateInstance Succesfully called LVM_GetMemoryTable\n");
-
- /* Allocate memory */
- for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
- if (MemTab.Region[i].Size != 0){
- MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size);
-
- if (MemTab.Region[i].pBaseAddress == LVM_NULL){
- ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %" PRIu32
- " bytes for region %u\n", MemTab.Region[i].Size, i );
- bMallocFailure = LVM_TRUE;
- }else{
- ALOGV("\tLvmBundle_init CreateInstance allocated %" PRIu32
- " bytes for region %u at %p\n",
- MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
- }
- }
- }
-
- /* If one or more of the memory regions failed to allocate, free the regions that were
- * succesfully allocated and return with an error
- */
- if(bMallocFailure == LVM_TRUE){
- for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
- if (MemTab.Region[i].pBaseAddress == LVM_NULL){
- ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %" PRIu32
- " bytes for region %u Not freeing\n", MemTab.Region[i].Size, i );
- }else{
- ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed: but allocated %" PRIu32
- " bytes for region %u at %p- free\n",
- MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
- free(MemTab.Region[i].pBaseAddress);
- }
- }
- return -EINVAL;
- }
- ALOGV("\tLvmBundle_init CreateInstance Succesfully malloc'd memory\n");
-
- /* Initialise */
- pContext->pBundledContext->hInstance = LVM_NULL;
-
- /* Init sets the instance handle */
LvmStatus = LVM_GetInstanceHandle(&pContext->pBundledContext->hInstance,
- &MemTab,
&InstParams);
LVM_ERROR_CHECK(LvmStatus, "LVM_GetInstanceHandle", "LvmBundle_init")
@@ -618,9 +563,7 @@
params.SpeakerType = LVM_HEADPHONES;
pContext->pBundledContext->SampleRate = LVM_FS_44100;
-#ifdef SUPPORT_MC
pContext->pBundledContext->ChMask = AUDIO_CHANNEL_OUT_STEREO;
-#endif
/* Concert Sound parameters */
params.VirtualizerOperatingMode = LVM_MODE_OFF;
@@ -666,11 +609,9 @@
params.TE_OperatingMode = LVM_TE_OFF;
params.TE_EffectLevel = 0;
-#ifdef SUPPORT_MC
params.NrChannels =
audio_channel_count_from_out_mask(AUDIO_CHANNEL_OUT_STEREO);
params.ChMask = AUDIO_CHANNEL_OUT_STEREO;
-#endif
/* Activate the initial settings */
LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance,
¶ms);
@@ -1030,41 +971,6 @@
return 0;
}
-//----------------------------------------------------------------------------
-// LvmEffect_free()
-//----------------------------------------------------------------------------
-// Purpose: Free all memory associated with the Bundle.
-//
-// Inputs:
-// pContext: effect engine context
-//
-// Outputs:
-//
-//----------------------------------------------------------------------------
-
-void LvmEffect_free(EffectContext *pContext){
- LVM_ReturnStatus_en LvmStatus=LVM_SUCCESS; /* Function call status */
- LVM_MemTab_t MemTab;
-
- /* Free the algorithm memory */
- LvmStatus = LVM_GetMemoryTable(pContext->pBundledContext->hInstance,
- &MemTab,
- LVM_NULL);
-
- LVM_ERROR_CHECK(LvmStatus, "LVM_GetMemoryTable", "LvmEffect_free")
-
- for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
- if (MemTab.Region[i].Size != 0){
- if (MemTab.Region[i].pBaseAddress != NULL){
- free(MemTab.Region[i].pBaseAddress);
- }else{
- ALOGV("\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer %" PRIu32
- " bytes for region %u at %p ERROR\n",
- MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
- }
- }
- }
-} /* end LvmEffect_free */
//----------------------------------------------------------------------------
// Effect_setConfig()
@@ -1090,11 +996,7 @@
CHECK_ARG(pConfig->inputCfg.samplingRate == pConfig->outputCfg.samplingRate);
CHECK_ARG(pConfig->inputCfg.channels == pConfig->outputCfg.channels);
CHECK_ARG(pConfig->inputCfg.format == pConfig->outputCfg.format);
-#ifdef SUPPORT_MC
CHECK_ARG(audio_channel_count_from_out_mask(pConfig->inputCfg.channels) <= LVM_MAX_CHANNELS);
-#else
- CHECK_ARG(pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_STEREO);
-#endif
CHECK_ARG(pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE
|| pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
CHECK_ARG(pConfig->inputCfg.format == EFFECT_BUFFER_FORMAT);
@@ -1147,12 +1049,8 @@
return -EINVAL;
}
-#ifdef SUPPORT_MC
if (pContext->pBundledContext->SampleRate != SampleRate ||
pContext->pBundledContext->ChMask != pConfig->inputCfg.channels) {
-#else
- if(pContext->pBundledContext->SampleRate != SampleRate){
-#endif
LVM_ControlParams_t ActiveParams;
LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS;
@@ -1168,19 +1066,15 @@
ActiveParams.SampleRate = SampleRate;
-#ifdef SUPPORT_MC
ActiveParams.NrChannels = NrChannels;
ActiveParams.ChMask = pConfig->inputCfg.channels;
-#endif
LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);
LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "Effect_setConfig")
ALOGV("\tEffect_setConfig Succesfully called LVM_SetControlParameters\n");
pContext->pBundledContext->SampleRate = SampleRate;
-#ifdef SUPPORT_MC
pContext->pBundledContext->ChMask = pConfig->inputCfg.channels;
-#endif
LvmEffect_limitLevel(pContext);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index 524e103..63bc45c 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -94,9 +94,7 @@
int frameCount;
int32_t bandGaindB[FIVEBAND_NUMBANDS];
int volume;
-#ifdef SUPPORT_MC
LVM_INT32 ChMask;
-#endif
/* Bitmask whether drain is in progress due to disabling the effect.
The corresponding bit to an effect is set by 1 << lvm_effect_en. */
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index 39f5bb6..4411a7d 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -259,6 +259,7 @@
int channels = audio_channel_count_from_out_mask(pContext->config.inputCfg.channels);
+ channels = (pContext->auxiliary == true)? channels : FCC_2;
// Allocate memory for reverb process (*2 is for STEREO)
pContext->bufferSizeIn = LVREV_MAX_FRAME_SIZE * sizeof(process_buffer_t) * channels;
pContext->bufferSizeOut = LVREV_MAX_FRAME_SIZE * sizeof(process_buffer_t) * FCC_2;
@@ -343,9 +344,9 @@
int channels = audio_channel_count_from_out_mask(pContext->config.inputCfg.channels);
LVREV_ReturnStatus_en LvmStatus = LVREV_SUCCESS; /* Function call status */
- // Check that the input is either mono or stereo
- if (!(channels == 1 || channels == FCC_2) ) {
- ALOGE("\tLVREV_ERROR : process invalid PCM format");
+ // Reverb only effects the stereo channels in multichannel source.
+ if (channels < 1 || channels > LVM_MAX_CHANNELS) {
+ ALOGE("\tLVREV_ERROR : process invalid PCM channels %d", channels);
return -EINVAL;
}
@@ -380,11 +381,20 @@
static_assert(std::is_same<decltype(*pIn), decltype(*pContext->InFrames)>::value,
"pIn and InFrames must be same type");
memcpy(pContext->InFrames, pIn, frameCount * channels * sizeof(*pIn));
+ } else {
+ // mono input is duplicated
+ if (channels >= FCC_2) {
+ for (int i = 0; i < frameCount; i++) {
+ pContext->InFrames[FCC_2 * i] =
+ (process_buffer_t)pIn[channels * i] * REVERB_SEND_LEVEL;
+ pContext->InFrames[FCC_2 * i + 1] =
+ (process_buffer_t)pIn[channels * i + 1] * REVERB_SEND_LEVEL;
+ }
} else {
- // insert reverb input is always stereo
- for (int i = 0; i < frameCount; i++) {
- pContext->InFrames[2 * i] = (process_buffer_t)pIn[2 * i] * REVERB_SEND_LEVEL;
- pContext->InFrames[2 * i + 1] = (process_buffer_t)pIn[2 * i + 1] * REVERB_SEND_LEVEL;
+ for (int i = 0; i < frameCount; i++) {
+ pContext->InFrames[FCC_2 * i] = pContext->InFrames[FCC_2 * i + 1] =
+ (process_buffer_t)pIn[i] * REVERB_SEND_LEVEL;
+ }
}
}
@@ -412,9 +422,18 @@
if (pContext->auxiliary) {
// nothing to do here
} else {
- for (int i = 0; i < frameCount * FCC_2; i++) { // always stereo here
- // Mix with dry input
- pContext->OutFrames[i] += pIn[i];
+ if (channels >= FCC_2) {
+ for (int i = 0; i < frameCount; i++) {
+ // Mix with dry input
+ pContext->OutFrames[FCC_2 * i] += pIn[channels * i];
+ pContext->OutFrames[FCC_2 * i + 1] += pIn[channels * i + 1];
+ }
+ } else {
+ for (int i = 0; i < frameCount; i++) {
+ // Mix with dry input
+ pContext->OutFrames[FCC_2 * i] += pIn[i];
+ pContext->OutFrames[FCC_2 * i + 1] += pIn[i];
+ }
}
// apply volume with ramp if needed
if ((pContext->leftVolume != pContext->prevLeftVolume ||
@@ -450,18 +469,33 @@
}
}
-
- // Accumulate if required
- if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE){
- //ALOGV("\tBuffer access is ACCUMULATE");
- for (int i = 0; i < frameCount * FCC_2; i++) { // always stereo here
- pOut[i] += pContext->OutFrames[i];
+ if (channels > 2) {
+ //Accumulate if required
+ if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ for (int i = 0; i < frameCount; i++) {
+ pOut[channels * i] += pContext->OutFrames[FCC_2 * i];
+ pOut[channels * i + 1] += pContext->OutFrames[FCC_2 * i + 1];
+ }
+ } else {
+ for (int i = 0; i < frameCount; i++) {
+ pOut[channels * i] = pContext->OutFrames[FCC_2 * i];
+ pOut[channels * i + 1] = pContext->OutFrames[FCC_2 * i + 1];
+ }
}
- }else{
- //ALOGV("\tBuffer access is WRITE");
- memcpy(pOut, pContext->OutFrames, frameCount * sizeof(*pOut) * FCC_2);
+ for (int i = 0; i < frameCount; i++) {
+ for (int j = FCC_2; j < channels; j++) {
+ pOut[channels * i + j] = pIn[channels * i + j];
+ }
+ }
+ } else {
+ if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ for (int i = 0; i < frameCount * FCC_2; i++) {
+ pOut[i] += pContext->OutFrames[i];
+ }
+ } else {
+ memcpy(pOut, pContext->OutFrames, frameCount * sizeof(*pOut) * FCC_2);
+ }
}
-
return 0;
} /* end process */
@@ -525,9 +559,12 @@
CHECK_ARG(pConfig->inputCfg.samplingRate == pConfig->outputCfg.samplingRate);
CHECK_ARG(pConfig->inputCfg.format == pConfig->outputCfg.format);
+ int inputChannels = audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
CHECK_ARG((pContext->auxiliary && pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_MONO) ||
- ((!pContext->auxiliary) && pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_STEREO));
- CHECK_ARG(pConfig->outputCfg.channels == AUDIO_CHANNEL_OUT_STEREO);
+ ((!pContext->auxiliary) &&
+ (inputChannels <= LVM_MAX_CHANNELS)));
+ int outputChannels = audio_channel_count_from_out_mask(pConfig->outputCfg.channels);
+ CHECK_ARG(outputChannels >= FCC_2 && outputChannels <= LVM_MAX_CHANNELS);
CHECK_ARG(pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE
|| pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
CHECK_ARG(pConfig->inputCfg.format == EFFECT_BUFFER_FORMAT);
@@ -691,7 +728,7 @@
/* Allocate memory */
for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
if (MemTab.Region[i].Size != 0){
- MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size);
+ MemTab.Region[i].pBaseAddress = calloc(1, MemTab.Region[i].Size);
if (MemTab.Region[i].pBaseAddress == LVM_NULL){
ALOGV("\tLVREV_ERROR :Reverb_init CreateInstance Failed to allocate %" PRIu32
@@ -749,6 +786,9 @@
params.SourceFormat = LVM_STEREO;
}
+ if ((pContext->auxiliary == false) && (params.SourceFormat == LVM_MONO)) {
+ params.SourceFormat = LVM_STEREO;
+ }
/* Reverb parameters */
params.Level = 0;
params.LPF = 23999;
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 1caee04..39523de 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -49,28 +49,6 @@
path: "aidl",
}
-filegroup {
- name: "resourcemanager_aidl",
- srcs: [
- "aidl/android/media/IResourceManagerClient.aidl",
- "aidl/android/media/IResourceManagerService.aidl",
- "aidl/android/media/MediaResourceType.aidl",
- "aidl/android/media/MediaResourceSubType.aidl",
- "aidl/android/media/MediaResourceParcel.aidl",
- "aidl/android/media/MediaResourcePolicyParcel.aidl",
- ],
- path: "aidl",
-}
-
-aidl_interface {
- name: "resourcemanager_aidl_interface",
- unstable: true,
- local_include_dir: "aidl",
- srcs: [
- ":resourcemanager_aidl",
- ],
-}
-
cc_library_shared {
name: "libmedia_omx",
vendor_available: true,
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index bd18a40..11005c6 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -62,11 +62,13 @@
}
virtual sp<IMediaPlayer> create(
- const sp<IMediaPlayerClient>& client, audio_session_t audioSessionId) {
+ const sp<IMediaPlayerClient>& client, audio_session_t audioSessionId,
+ const std::string opPackageName) {
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(client));
data.writeInt32(audioSessionId);
+ data.writeCString(opPackageName.c_str());
remote()->transact(CREATE, data, &reply);
return interface_cast<IMediaPlayer>(reply.readStrongBinder());
@@ -127,7 +129,12 @@
sp<IMediaPlayerClient> client =
interface_cast<IMediaPlayerClient>(data.readStrongBinder());
audio_session_t audioSessionId = (audio_session_t) data.readInt32();
- sp<IMediaPlayer> player = create(client, audioSessionId);
+ const char* opPackageName = data.readCString();
+ if (opPackageName == nullptr) {
+ return FAILED_TRANSACTION;
+ }
+ std::string opPackageNameStr(opPackageName);
+ sp<IMediaPlayer> player = create(client, audioSessionId, opPackageNameStr);
reply->writeStrongBinder(IInterface::asBinder(player));
return NO_ERROR;
} break;
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 1be82d8..e8839ba 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -240,7 +240,10 @@
const size_t nMappings = sizeof(sVideoEncoderNameMap)/sizeof(sVideoEncoderNameMap[0]);
const int codec = findTagForName(sVideoEncoderNameMap, nMappings, atts[1]);
- CHECK(codec != -1);
+ if (codec == -1) {
+ ALOGE("MediaProfiles::createVideoCodec failed to locate codec %s", atts[1]);
+ return nullptr;
+ }
MediaProfiles::VideoCodec *videoCodec =
new MediaProfiles::VideoCodec(static_cast<video_encoder>(codec),
@@ -262,7 +265,10 @@
!strcmp("channels", atts[6]));
const size_t nMappings = sizeof(sAudioEncoderNameMap)/sizeof(sAudioEncoderNameMap[0]);
const int codec = findTagForName(sAudioEncoderNameMap, nMappings, atts[1]);
- CHECK(codec != -1);
+ if (codec == -1) {
+ ALOGE("MediaProfiles::createAudioCodec failed to locate codec %s", atts[1]);
+ return nullptr;
+ }
MediaProfiles::AudioCodec *audioCodec =
new MediaProfiles::AudioCodec(static_cast<audio_encoder>(codec),
@@ -282,7 +288,10 @@
const size_t nMappings = sizeof(sAudioDecoderNameMap)/sizeof(sAudioDecoderNameMap[0]);
const int codec = findTagForName(sAudioDecoderNameMap, nMappings, atts[1]);
- CHECK(codec != -1);
+ if (codec == -1) {
+ ALOGE("MediaProfiles::createAudioDecoderCap failed to locate codec %s", atts[1]);
+ return nullptr;
+ }
MediaProfiles::AudioDecoderCap *cap =
new MediaProfiles::AudioDecoderCap(static_cast<audio_decoder>(codec));
@@ -298,7 +307,10 @@
const size_t nMappings = sizeof(sVideoDecoderNameMap)/sizeof(sVideoDecoderNameMap[0]);
const int codec = findTagForName(sVideoDecoderNameMap, nMappings, atts[1]);
- CHECK(codec != -1);
+ if (codec == -1) {
+ ALOGE("MediaProfiles::createVideoDecoderCap failed to locate codec %s", atts[1]);
+ return nullptr;
+ }
MediaProfiles::VideoDecoderCap *cap =
new MediaProfiles::VideoDecoderCap(static_cast<video_decoder>(codec));
@@ -322,7 +334,10 @@
const size_t nMappings = sizeof(sVideoEncoderNameMap)/sizeof(sVideoEncoderNameMap[0]);
const int codec = findTagForName(sVideoEncoderNameMap, nMappings, atts[1]);
- CHECK(codec != -1);
+ if (codec == -1) {
+ ALOGE("MediaProfiles::createVideoEncoderCap failed to locate codec %s", atts[1]);
+ return nullptr;
+ }
MediaProfiles::VideoEncoderCap *cap =
new MediaProfiles::VideoEncoderCap(static_cast<video_encoder>(codec),
@@ -346,7 +361,10 @@
const size_t nMappings = sizeof(sAudioEncoderNameMap)/sizeof(sAudioEncoderNameMap[0]);
const int codec = findTagForName(sAudioEncoderNameMap, nMappings, atts[1]);
- CHECK(codec != -1);
+ if (codec == -1) {
+ ALOGE("MediaProfiles::createAudioEncoderCap failed to locate codec %s", atts[1]);
+ return nullptr;
+ }
MediaProfiles::AudioEncoderCap *cap =
new MediaProfiles::AudioEncoderCap(static_cast<audio_encoder>(codec), atoi(atts[5]),
@@ -386,11 +404,17 @@
const size_t nProfileMappings = sizeof(sCamcorderQualityNameMap)/
sizeof(sCamcorderQualityNameMap[0]);
const int quality = findTagForName(sCamcorderQualityNameMap, nProfileMappings, atts[1]);
- CHECK(quality != -1);
+ if (quality == -1) {
+ ALOGE("MediaProfiles::createCamcorderProfile failed to locate quality %s", atts[1]);
+ return nullptr;
+ }
const size_t nFormatMappings = sizeof(sFileFormatMap)/sizeof(sFileFormatMap[0]);
const int fileFormat = findTagForName(sFileFormatMap, nFormatMappings, atts[3]);
- CHECK(fileFormat != -1);
+ if (fileFormat == -1) {
+ ALOGE("MediaProfiles::createCamcorderProfile failed to locate file format %s", atts[1]);
+ return nullptr;
+ }
MediaProfiles::CamcorderProfile *profile = new MediaProfiles::CamcorderProfile;
profile->mCameraId = cameraId;
@@ -462,24 +486,39 @@
createAudioCodec(atts, profiles);
} else if (strcmp("VideoEncoderCap", name) == 0 &&
strcmp("true", atts[3]) == 0) {
- profiles->mVideoEncoders.add(createVideoEncoderCap(atts));
+ MediaProfiles::VideoEncoderCap* cap = createVideoEncoderCap(atts);
+ if (cap != nullptr) {
+ profiles->mVideoEncoders.add(cap);
+ }
} else if (strcmp("AudioEncoderCap", name) == 0 &&
strcmp("true", atts[3]) == 0) {
- profiles->mAudioEncoders.add(createAudioEncoderCap(atts));
+ MediaProfiles::AudioEncoderCap* cap = createAudioEncoderCap(atts);
+ if (cap != nullptr) {
+ profiles->mAudioEncoders.add(cap);
+ }
} else if (strcmp("VideoDecoderCap", name) == 0 &&
strcmp("true", atts[3]) == 0) {
- profiles->mVideoDecoders.add(createVideoDecoderCap(atts));
+ MediaProfiles::VideoDecoderCap* cap = createVideoDecoderCap(atts);
+ if (cap != nullptr) {
+ profiles->mVideoDecoders.add(cap);
+ }
} else if (strcmp("AudioDecoderCap", name) == 0 &&
strcmp("true", atts[3]) == 0) {
- profiles->mAudioDecoders.add(createAudioDecoderCap(atts));
+ MediaProfiles::AudioDecoderCap* cap = createAudioDecoderCap(atts);
+ if (cap != nullptr) {
+ profiles->mAudioDecoders.add(cap);
+ }
} else if (strcmp("EncoderOutputFileFormat", name) == 0) {
profiles->mEncoderOutputFileFormats.add(createEncoderOutputFileFormat(atts));
} else if (strcmp("CamcorderProfiles", name) == 0) {
profiles->mCurrentCameraId = getCameraId(atts);
profiles->addStartTimeOffset(profiles->mCurrentCameraId, atts);
} else if (strcmp("EncoderProfile", name) == 0) {
- profiles->mCamcorderProfiles.add(
- createCamcorderProfile(profiles->mCurrentCameraId, atts, profiles->mCameraIds));
+ MediaProfiles::CamcorderProfile* profile = createCamcorderProfile(
+ profiles->mCurrentCameraId, atts, profiles->mCameraIds);
+ if (profile != nullptr) {
+ profiles->mCamcorderProfiles.add(profile);
+ }
} else if (strcmp("ImageEncoding", name) == 0) {
profiles->addImageEncodingQualityLevel(profiles->mCurrentCameraId, atts);
}
diff --git a/media/libmedia/MediaResource.cpp b/media/libmedia/MediaResource.cpp
index fe86d27..ec52a49 100644
--- a/media/libmedia/MediaResource.cpp
+++ b/media/libmedia/MediaResource.cpp
@@ -43,11 +43,11 @@
}
//static
-MediaResource MediaResource::CodecResource(bool secure, bool video) {
+MediaResource MediaResource::CodecResource(bool secure, bool video, int64_t instanceCount) {
return MediaResource(
secure ? Type::kSecureCodec : Type::kNonSecureCodec,
video ? SubType::kVideoCodec : SubType::kAudioCodec,
- 1);
+ instanceCount);
}
//static
diff --git a/media/libmedia/include/media/IMediaPlayerService.h b/media/libmedia/include/media/IMediaPlayerService.h
index f2e2060..a4207eb 100644
--- a/media/libmedia/include/media/IMediaPlayerService.h
+++ b/media/libmedia/include/media/IMediaPlayerService.h
@@ -28,6 +28,8 @@
#include <media/IMediaPlayerClient.h>
#include <media/IMediaMetadataRetriever.h>
+#include <string>
+
namespace android {
class IMediaPlayer;
@@ -47,7 +49,8 @@
virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName) = 0;
virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client,
- audio_session_t audioSessionId = AUDIO_SESSION_ALLOCATE) = 0;
+ audio_session_t audioSessionId = AUDIO_SESSION_ALLOCATE,
+ const std::string opPackage = "") = 0;
virtual sp<IMediaCodecList> getCodecList() const = 0;
// Connects to a remote display.
diff --git a/media/libmedia/include/media/MediaResource.h b/media/libmedia/include/media/MediaResource.h
index 4927d28..4712528 100644
--- a/media/libmedia/include/media/MediaResource.h
+++ b/media/libmedia/include/media/MediaResource.h
@@ -37,7 +37,7 @@
MediaResource(Type type, SubType subType, int64_t value);
MediaResource(Type type, const std::vector<uint8_t> &id, int64_t value);
- static MediaResource CodecResource(bool secure, bool video);
+ static MediaResource CodecResource(bool secure, bool video, int64_t instanceCount = 1);
static MediaResource GraphicMemoryResource(int64_t value);
static MediaResource CpuBoostResource();
static MediaResource VideoBatteryResource();
diff --git a/media/libmedia/include/media/mediaplayer.h b/media/libmedia/include/media/mediaplayer.h
index 0073375..71c0bc5 100644
--- a/media/libmedia/include/media/mediaplayer.h
+++ b/media/libmedia/include/media/mediaplayer.h
@@ -33,6 +33,8 @@
#include <utils/KeyedVector.h>
#include <utils/String8.h>
+#include <string>
+
struct ANativeWindow;
namespace android {
@@ -209,7 +211,7 @@
public virtual IMediaDeathNotifier
{
public:
- MediaPlayer();
+ MediaPlayer(const std::string opPackageName = "");
~MediaPlayer();
void died();
void disconnect();
@@ -313,6 +315,7 @@
float mSendLevel;
struct sockaddr_in mRetransmitEndpoint;
bool mRetransmitEndpointValid;
+ const std::string mOpPackageName;
};
}; // namespace android
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 1b89fc7..30c5006 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -41,7 +41,7 @@
using media::VolumeShaper;
-MediaPlayer::MediaPlayer()
+MediaPlayer::MediaPlayer(const std::string opPackageName) : mOpPackageName(opPackageName)
{
ALOGV("constructor");
mListener = NULL;
@@ -152,7 +152,7 @@
if (url != NULL) {
const sp<IMediaPlayerService> service(getMediaPlayerService());
if (service != 0) {
- sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
+ sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mOpPackageName));
if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
(NO_ERROR != player->setDataSource(httpService, url, headers))) {
player.clear();
@@ -169,7 +169,7 @@
status_t err = UNKNOWN_ERROR;
const sp<IMediaPlayerService> service(getMediaPlayerService());
if (service != 0) {
- sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
+ sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mOpPackageName));
if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
(NO_ERROR != player->setDataSource(fd, offset, length))) {
player.clear();
@@ -185,7 +185,7 @@
status_t err = UNKNOWN_ERROR;
const sp<IMediaPlayerService> service(getMediaPlayerService());
if (service != 0) {
- sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
+ sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mOpPackageName));
if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
(NO_ERROR != player->setDataSource(source))) {
player.clear();
@@ -201,7 +201,7 @@
status_t err = UNKNOWN_ERROR;
const sp<IMediaPlayerService> service(getMediaPlayerService());
if (service != 0) {
- sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
+ sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mOpPackageName));
if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
(NO_ERROR != player->setDataSource(rtpParams))) {
player.clear();
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 555f459..4d90d98 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -480,14 +480,14 @@
}
sp<IMediaPlayer> MediaPlayerService::create(const sp<IMediaPlayerClient>& client,
- audio_session_t audioSessionId)
+ audio_session_t audioSessionId, std::string opPackageName)
{
pid_t pid = IPCThreadState::self()->getCallingPid();
int32_t connId = android_atomic_inc(&mNextConnId);
sp<Client> c = new Client(
this, pid, connId, client, audioSessionId,
- IPCThreadState::self()->getCallingUid());
+ IPCThreadState::self()->getCallingUid(), opPackageName);
ALOGV("Create new client(%d) from pid %d, uid %d, ", connId, pid,
IPCThreadState::self()->getCallingUid());
@@ -733,7 +733,8 @@
MediaPlayerService::Client::Client(
const sp<MediaPlayerService>& service, pid_t pid,
int32_t connId, const sp<IMediaPlayerClient>& client,
- audio_session_t audioSessionId, uid_t uid)
+ audio_session_t audioSessionId, uid_t uid, const std::string& opPackageName)
+ : mOpPackageName(opPackageName)
{
ALOGV("Client(%d) constructor", connId);
mPid = pid;
@@ -922,7 +923,7 @@
if (!p->hardwareOutput()) {
mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
- mPid, mAudioAttributes, mAudioDeviceUpdatedListener);
+ mPid, mAudioAttributes, mAudioDeviceUpdatedListener, mOpPackageName);
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}
@@ -1772,7 +1773,8 @@
#undef LOG_TAG
#define LOG_TAG "AudioSink"
MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
- const audio_attributes_t* attr, const sp<AudioSystem::AudioDeviceCallback>& deviceCallback)
+ const audio_attributes_t* attr, const sp<AudioSystem::AudioDeviceCallback>& deviceCallback,
+ const std::string& opPackageName)
: mCallback(NULL),
mCallbackCookie(NULL),
mCallbackData(NULL),
@@ -1793,7 +1795,8 @@
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
mDeviceCallbackEnabled(false),
- mDeviceCallback(deviceCallback)
+ mDeviceCallback(deviceCallback),
+ mOpPackageName(opPackageName)
{
ALOGV("AudioOutput(%d)", sessionId);
if (attr != NULL) {
@@ -2187,7 +2190,8 @@
mAttributes,
doNotReconnect,
1.0f, // default value for maxRequiredSpeed
- mSelectedDeviceId);
+ mSelectedDeviceId,
+ mOpPackageName);
} else {
// TODO: Due to buffer memory concerns, we use a max target playback speed
// based on mPlaybackRate at the time of open (instead of kMaxRequiredSpeed),
@@ -2215,7 +2219,8 @@
mAttributes,
doNotReconnect,
targetSpeed,
- mSelectedDeviceId);
+ mSelectedDeviceId,
+ mOpPackageName);
}
// Set caller name so it can be logged in destructor.
// MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_MEDIA
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 3d596a5..b2f1b9b 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -19,6 +19,7 @@
#define ANDROID_MEDIAPLAYERSERVICE_H
#include <arpa/inet.h>
+#include <string>
#include <utils/threads.h>
#include <utils/Errors.h>
@@ -81,7 +82,8 @@
uid_t uid,
int pid,
const audio_attributes_t * attr,
- const sp<AudioSystem::AudioDeviceCallback>& deviceCallback);
+ const sp<AudioSystem::AudioDeviceCallback>& deviceCallback,
+ const std::string& opPackageName);
virtual ~AudioOutput();
virtual bool ready() const { return mTrack != 0; }
@@ -178,6 +180,7 @@
bool mDeviceCallbackEnabled;
wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
mutable Mutex mLock;
+ const std::string mOpPackageName;
// static variables below not protected by mutex
static bool mIsOnEmulator;
@@ -235,7 +238,8 @@
virtual sp<IMediaMetadataRetriever> createMetadataRetriever();
virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client,
- audio_session_t audioSessionId);
+ audio_session_t audioSessionId,
+ const std::string opPackageName);
virtual sp<IMediaCodecList> getCodecList() const;
@@ -411,7 +415,8 @@
int32_t connId,
const sp<IMediaPlayerClient>& client,
audio_session_t audioSessionId,
- uid_t uid);
+ uid_t uid,
+ const std::string& opPackageName);
Client();
virtual ~Client();
@@ -468,6 +473,7 @@
bool mRetransmitEndpointValid;
sp<Client> mNextClient;
sp<MediaPlayerBase::Listener> mListener;
+ const std::string mOpPackageName;
// Metadata filters.
media::Metadata::Filter mMetadataAllow; // protected by mLock
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 9b1974b..1cc255d 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -28,6 +28,7 @@
#include <binder/IServiceManager.h>
#include <binder/MemoryHeapBase.h>
#include <binder/MemoryBase.h>
+#include <camera/CameraUtils.h>
#include <codec2/hidl/client.h>
#include <cutils/atomic.h>
#include <cutils/properties.h> // for property_get
@@ -423,30 +424,35 @@
sp<IServiceManager> sm = defaultServiceManager();
- // WORKAROUND: We don't know if camera exists here and getService might block for 5 seconds.
- // Use checkService for camera if we don't know it exists.
- static std::atomic<bool> sCameraChecked(false); // once true never becomes false.
- static std::atomic<bool> sCameraVerified(false); // once true never becomes false.
- sp<IBinder> binder = (sCameraVerified || !sCameraChecked)
- ? sm->getService(String16("media.camera")) : sm->checkService(String16("media.camera"));
- // If the device does not have a camera, do not create a death listener for it.
- if (binder != NULL) {
- sCameraVerified = true;
- mDeathNotifiers.emplace_back(
- binder, [l = wp<IMediaRecorderClient>(listener)](){
- sp<IMediaRecorderClient> listener = l.promote();
- if (listener) {
- ALOGV("media.camera service died. "
- "Sending death notification.");
- listener->notify(
- MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED,
- MediaPlayerService::CAMERA_PROCESS_DEATH);
- } else {
- ALOGW("media.camera service died without a death handler.");
- }
- });
+ static const bool sCameraDisabled = CameraUtils::isCameraServiceDisabled();
+
+ if (!sCameraDisabled) {
+ // WORKAROUND: We don't know if camera exists here and getService might block for 5 seconds.
+ // Use checkService for camera if we don't know it exists.
+ static std::atomic<bool> sCameraChecked(false); // once true never becomes false.
+ static std::atomic<bool> sCameraVerified(false); // once true never becomes false.
+
+ sp<IBinder> binder = (sCameraVerified || !sCameraChecked)
+ ? sm->getService(String16("media.camera")) : sm->checkService(String16("media.camera"));
+ // If the device does not have a camera, do not create a death listener for it.
+ if (binder != NULL) {
+ sCameraVerified = true;
+ mDeathNotifiers.emplace_back(
+ binder, [l = wp<IMediaRecorderClient>(listener)](){
+ sp<IMediaRecorderClient> listener = l.promote();
+ if (listener) {
+ ALOGV("media.camera service died. "
+ "Sending death notification.");
+ listener->notify(
+ MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED,
+ MediaPlayerService::CAMERA_PROCESS_DEATH);
+ } else {
+ ALOGW("media.camera service died without a death handler.");
+ }
+ });
+ }
+ sCameraChecked = true;
}
- sCameraChecked = true;
{
using ::android::hidl::base::V1_0::IBase;
diff --git a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
index f114046..c81a659 100644
--- a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
+++ b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
@@ -65,6 +65,14 @@
return true;
}
+ virtual bool overrideProcessInfo(
+ int /* pid */, int /* procState */, int /* oomScore */) {
+ return true;
+ }
+
+ virtual void removeProcessInfoOverride(int /* pid */) {
+ }
+
private:
DISALLOW_EVIL_CONSTRUCTORS(FakeProcessInfo);
};
diff --git a/media/libmediatranscoding/Android.bp b/media/libmediatranscoding/Android.bp
index 29ed65a..128d0d8 100644
--- a/media/libmediatranscoding/Android.bp
+++ b/media/libmediatranscoding/Android.bp
@@ -14,11 +14,8 @@
* limitations under the License.
*/
-// AIDL interfaces of MediaTranscoding.
-aidl_interface {
- name: "mediatranscoding_aidl_interface",
- unstable: true,
- local_include_dir: "aidl",
+filegroup {
+ name: "libmediatranscoding_aidl",
srcs: [
"aidl/android/media/IMediaTranscodingService.aidl",
"aidl/android/media/ITranscodingClient.aidl",
@@ -34,6 +31,15 @@
"aidl/android/media/TranscodingResultParcel.aidl",
"aidl/android/media/TranscodingTestConfig.aidl",
],
+ path: "aidl",
+}
+
+// AIDL interfaces of MediaTranscoding.
+aidl_interface {
+ name: "mediatranscoding_aidl_interface",
+ unstable: true,
+ local_include_dir: "aidl",
+ srcs: [":libmediatranscoding_aidl"],
backend:
{
java: {
@@ -48,6 +54,7 @@
srcs: [
"TranscodingClientManager.cpp",
"TranscodingJobScheduler.cpp",
+ "TranscodingResourcePolicy.cpp",
"TranscodingUidPolicy.cpp",
"TranscoderWrapper.cpp",
],
@@ -61,11 +68,16 @@
"libbinder",
"libmediandk",
],
+ export_shared_lib_headers: [
+ "libmediandk",
+ ],
export_include_dirs: ["include"],
static_libs: [
"mediatranscoding_aidl_interface-ndk_platform",
+ "resourcemanager_aidl_interface-ndk_platform",
+ "resourceobserver_aidl_interface-ndk_platform",
],
cflags: [
diff --git a/media/libmediatranscoding/TranscoderWrapper.cpp b/media/libmediatranscoding/TranscoderWrapper.cpp
index bd03671..8062fcf 100644
--- a/media/libmediatranscoding/TranscoderWrapper.cpp
+++ b/media/libmediatranscoding/TranscoderWrapper.cpp
@@ -89,26 +89,40 @@
}
//static
-const char* TranscoderWrapper::toString(Event::Type type) {
- switch (type) {
+std::string TranscoderWrapper::toString(const Event& event) {
+ std::string typeStr;
+ switch (event.type) {
case Event::Start:
- return "Start";
- case Event::Pause:
- return "Pause";
- case Event::Resume:
- return "Resume";
- case Event::Stop:
- return "Stop";
- case Event::Finish:
- return "Finish";
- case Event::Error:
- return "Error";
- case Event::Progress:
- return "Progress";
- default:
+ typeStr = "Start";
break;
+ case Event::Pause:
+ typeStr = "Pause";
+ break;
+ case Event::Resume:
+ typeStr = "Resume";
+ break;
+ case Event::Stop:
+ typeStr = "Stop";
+ break;
+ case Event::Finish:
+ typeStr = "Finish";
+ break;
+ case Event::Error:
+ typeStr = "Error";
+ break;
+ case Event::Progress:
+ typeStr = "Progress";
+ break;
+ default:
+ return "(unknown)";
}
- return "(unknown)";
+ std::string result;
+ result = "job {" + std::to_string(event.clientId) + "," + std::to_string(event.jobId) +
+ "}: " + typeStr;
+ if (event.type == Event::Error || event.type == Event::Progress) {
+ result += " " + std::to_string(event.arg);
+ }
+ return result;
}
class TranscoderWrapper::CallbackImpl : public MediaTranscoder::CallbackInterface {
@@ -128,7 +142,7 @@
media_status_t error) override {
auto owner = mOwner.lock();
if (owner != nullptr) {
- owner->onError(mClientId, mJobId, toTranscodingError(error));
+ owner->onError(mClientId, mJobId, error);
}
}
@@ -160,20 +174,41 @@
mCallback = cb;
}
+static bool isResourceError(media_status_t err) {
+ return err == AMEDIACODEC_ERROR_RECLAIMED || err == AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE;
+}
+
+void TranscoderWrapper::reportError(ClientIdType clientId, JobIdType jobId, media_status_t err) {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ if (isResourceError(err)) {
+ // Add a placeholder pause state to mPausedStateMap. This is required when resuming.
+ // TODO: remove this when transcoder pause/resume logic is ready. New logic will
+ // no longer use the pause states.
+ auto it = mPausedStateMap.find(JobKeyType(clientId, jobId));
+ if (it == mPausedStateMap.end()) {
+ mPausedStateMap.emplace(JobKeyType(clientId, jobId),
+ std::shared_ptr<const Parcel>());
+ }
+
+ callback->onResourceLost();
+ } else {
+ callback->onError(clientId, jobId, toTranscodingError(err));
+ }
+ }
+}
+
void TranscoderWrapper::start(ClientIdType clientId, JobIdType jobId,
const TranscodingRequestParcel& request,
const std::shared_ptr<ITranscodingClientCallback>& clientCb) {
queueEvent(Event::Start, clientId, jobId, [=] {
- TranscodingErrorCode err = handleStart(clientId, jobId, request, clientCb);
+ media_status_t err = handleStart(clientId, jobId, request, clientCb);
- auto callback = mCallback.lock();
- if (err != TranscodingErrorCode::kNoError) {
+ if (err != AMEDIA_OK) {
cleanup();
-
- if (callback != nullptr) {
- callback->onError(clientId, jobId, err);
- }
+ reportError(clientId, jobId, err);
} else {
+ auto callback = mCallback.lock();
if (callback != nullptr) {
callback->onStarted(clientId, jobId);
}
@@ -183,15 +218,15 @@
void TranscoderWrapper::pause(ClientIdType clientId, JobIdType jobId) {
queueEvent(Event::Pause, clientId, jobId, [=] {
- TranscodingErrorCode err = handlePause(clientId, jobId);
+ media_status_t err = handlePause(clientId, jobId);
cleanup();
- auto callback = mCallback.lock();
- if (callback != nullptr) {
- if (err != TranscodingErrorCode::kNoError) {
- callback->onError(clientId, jobId, err);
- } else {
+ if (err != AMEDIA_OK) {
+ reportError(clientId, jobId, err);
+ } else {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
callback->onPaused(clientId, jobId);
}
}
@@ -202,16 +237,13 @@
const TranscodingRequestParcel& request,
const std::shared_ptr<ITranscodingClientCallback>& clientCb) {
queueEvent(Event::Resume, clientId, jobId, [=] {
- TranscodingErrorCode err = handleResume(clientId, jobId, request, clientCb);
+ media_status_t err = handleResume(clientId, jobId, request, clientCb);
- auto callback = mCallback.lock();
- if (err != TranscodingErrorCode::kNoError) {
+ if (err != AMEDIA_OK) {
cleanup();
-
- if (callback != nullptr) {
- callback->onError(clientId, jobId, err);
- }
+ reportError(clientId, jobId, err);
} else {
+ auto callback = mCallback.lock();
if (callback != nullptr) {
callback->onResumed(clientId, jobId);
}
@@ -225,7 +257,7 @@
// Cancelling the currently running job.
media_status_t err = mTranscoder->cancel();
if (err != AMEDIA_OK) {
- ALOGE("failed to stop transcoder: %d", err);
+ ALOGW("failed to stop transcoder: %d", err);
} else {
ALOGI("transcoder stopped");
}
@@ -251,41 +283,43 @@
});
}
-void TranscoderWrapper::onError(ClientIdType clientId, JobIdType jobId,
- TranscodingErrorCode error) {
- queueEvent(Event::Error, clientId, jobId, [=] {
- if (mTranscoder != nullptr && clientId == mCurrentClientId && jobId == mCurrentJobId) {
- cleanup();
- }
-
- auto callback = mCallback.lock();
- if (callback != nullptr) {
- callback->onError(clientId, jobId, error);
- }
- });
+void TranscoderWrapper::onError(ClientIdType clientId, JobIdType jobId, media_status_t error) {
+ queueEvent(
+ Event::Error, clientId, jobId,
+ [=] {
+ if (mTranscoder != nullptr && clientId == mCurrentClientId &&
+ jobId == mCurrentJobId) {
+ cleanup();
+ }
+ reportError(clientId, jobId, error);
+ },
+ error);
}
void TranscoderWrapper::onProgress(ClientIdType clientId, JobIdType jobId, int32_t progress) {
- queueEvent(Event::Progress, clientId, jobId, [=] {
- auto callback = mCallback.lock();
- if (callback != nullptr) {
- callback->onProgressUpdate(clientId, jobId, progress);
- }
- });
+ queueEvent(
+ Event::Progress, clientId, jobId,
+ [=] {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ callback->onProgressUpdate(clientId, jobId, progress);
+ }
+ },
+ progress);
}
-TranscodingErrorCode TranscoderWrapper::setupTranscoder(
+media_status_t TranscoderWrapper::setupTranscoder(
ClientIdType clientId, JobIdType jobId, const TranscodingRequestParcel& request,
const std::shared_ptr<ITranscodingClientCallback>& clientCb,
const std::shared_ptr<const Parcel>& pausedState) {
if (clientCb == nullptr) {
ALOGE("client callback is null");
- return TranscodingErrorCode::kInvalidParameter;
+ return AMEDIA_ERROR_INVALID_PARAMETER;
}
if (mTranscoder != nullptr) {
ALOGE("transcoder already running");
- return TranscodingErrorCode::kInvalidOperation;
+ return AMEDIA_ERROR_INVALID_OPERATION;
}
Status status;
@@ -293,7 +327,7 @@
status = clientCb->openFileDescriptor(request.sourceFilePath, "r", &srcFd);
if (!status.isOk() || srcFd.get() < 0) {
ALOGE("failed to open source");
- return TranscodingErrorCode::kErrorIO;
+ return AMEDIA_ERROR_IO;
}
// Open dest file with "rw", as the transcoder could potentially reuse part of it
@@ -302,7 +336,7 @@
status = clientCb->openFileDescriptor(request.destinationFilePath, "rw", &dstFd);
if (!status.isOk() || dstFd.get() < 0) {
ALOGE("failed to open destination");
- return TranscodingErrorCode::kErrorIO;
+ return AMEDIA_ERROR_IO;
}
mCurrentClientId = clientId;
@@ -311,19 +345,19 @@
mTranscoder = MediaTranscoder::create(mTranscoderCb, pausedState);
if (mTranscoder == nullptr) {
ALOGE("failed to create transcoder");
- return TranscodingErrorCode::kUnknown;
+ return AMEDIA_ERROR_UNKNOWN;
}
media_status_t err = mTranscoder->configureSource(srcFd.get());
if (err != AMEDIA_OK) {
ALOGE("failed to configure source: %d", err);
- return toTranscodingError(err);
+ return err;
}
std::vector<std::shared_ptr<AMediaFormat>> trackFormats = mTranscoder->getTrackFormats();
if (trackFormats.size() == 0) {
ALOGE("failed to get track formats!");
- return TranscodingErrorCode::kMalformed;
+ return AMEDIA_ERROR_MALFORMED;
}
for (int i = 0; i < trackFormats.size(); ++i) {
@@ -341,43 +375,43 @@
}
if (err != AMEDIA_OK) {
ALOGE("failed to configure track format for track %d: %d", i, err);
- return toTranscodingError(err);
+ return err;
}
}
err = mTranscoder->configureDestination(dstFd.get());
if (err != AMEDIA_OK) {
ALOGE("failed to configure dest: %d", err);
- return toTranscodingError(err);
+ return err;
}
- return TranscodingErrorCode::kNoError;
+ return AMEDIA_OK;
}
-TranscodingErrorCode TranscoderWrapper::handleStart(
+media_status_t TranscoderWrapper::handleStart(
ClientIdType clientId, JobIdType jobId, const TranscodingRequestParcel& request,
const std::shared_ptr<ITranscodingClientCallback>& clientCb) {
- ALOGI("setting up transcoder for start");
- TranscodingErrorCode err = setupTranscoder(clientId, jobId, request, clientCb);
- if (err != TranscodingErrorCode::kNoError) {
+ ALOGI("%s: setting up transcoder for start", __FUNCTION__);
+ media_status_t err = setupTranscoder(clientId, jobId, request, clientCb);
+ if (err != AMEDIA_OK) {
ALOGI("%s: failed to setup transcoder", __FUNCTION__);
return err;
}
- media_status_t status = mTranscoder->start();
- if (status != AMEDIA_OK) {
+ err = mTranscoder->start();
+ if (err != AMEDIA_OK) {
ALOGE("%s: failed to start transcoder: %d", __FUNCTION__, err);
- return toTranscodingError(status);
+ return err;
}
ALOGI("%s: transcoder started", __FUNCTION__);
- return TranscodingErrorCode::kNoError;
+ return AMEDIA_OK;
}
-TranscodingErrorCode TranscoderWrapper::handlePause(ClientIdType clientId, JobIdType jobId) {
+media_status_t TranscoderWrapper::handlePause(ClientIdType clientId, JobIdType jobId) {
if (mTranscoder == nullptr) {
ALOGE("%s: transcoder is not running", __FUNCTION__);
- return TranscodingErrorCode::kInvalidOperation;
+ return AMEDIA_ERROR_INVALID_OPERATION;
}
if (clientId != mCurrentClientId || jobId != mCurrentJobId) {
@@ -385,19 +419,21 @@
(long long)clientId, jobId, (long long)mCurrentClientId, mCurrentJobId);
}
+ ALOGI("%s: pausing transcoder", __FUNCTION__);
+
std::shared_ptr<const Parcel> pauseStates;
media_status_t err = mTranscoder->pause(&pauseStates);
if (err != AMEDIA_OK) {
ALOGE("%s: failed to pause transcoder: %d", __FUNCTION__, err);
- return toTranscodingError(err);
+ return err;
}
mPausedStateMap[JobKeyType(clientId, jobId)] = pauseStates;
ALOGI("%s: transcoder paused", __FUNCTION__);
- return TranscodingErrorCode::kNoError;
+ return AMEDIA_OK;
}
-TranscodingErrorCode TranscoderWrapper::handleResume(
+media_status_t TranscoderWrapper::handleResume(
ClientIdType clientId, JobIdType jobId, const TranscodingRequestParcel& request,
const std::shared_ptr<ITranscodingClientCallback>& clientCb) {
std::shared_ptr<const Parcel> pausedState;
@@ -407,24 +443,24 @@
mPausedStateMap.erase(it);
} else {
ALOGE("%s: can't find paused state", __FUNCTION__);
- return TranscodingErrorCode::kInvalidOperation;
+ return AMEDIA_ERROR_INVALID_OPERATION;
}
- ALOGI("setting up transcoder for resume");
- TranscodingErrorCode err = setupTranscoder(clientId, jobId, request, clientCb, pausedState);
- if (err != TranscodingErrorCode::kNoError) {
- ALOGE("%s: failed to setup transcoder", __FUNCTION__);
+ ALOGI("%s: setting up transcoder for resume", __FUNCTION__);
+ media_status_t err = setupTranscoder(clientId, jobId, request, clientCb, pausedState);
+ if (err != AMEDIA_OK) {
+ ALOGE("%s: failed to setup transcoder: %d", __FUNCTION__, err);
return err;
}
- media_status_t status = mTranscoder->resume();
- if (status != AMEDIA_OK) {
+ err = mTranscoder->resume();
+ if (err != AMEDIA_OK) {
ALOGE("%s: failed to resume transcoder: %d", __FUNCTION__, err);
- return toTranscodingError(status);
+ return err;
}
ALOGI("%s: transcoder resumed", __FUNCTION__);
- return TranscodingErrorCode::kNoError;
+ return AMEDIA_OK;
}
void TranscoderWrapper::cleanup() {
@@ -435,12 +471,10 @@
}
void TranscoderWrapper::queueEvent(Event::Type type, ClientIdType clientId, JobIdType jobId,
- const std::function<void()> runnable) {
- ALOGV("%s: job {%lld, %d}: %s", __FUNCTION__, (long long)clientId, jobId, toString(type));
-
+ const std::function<void()> runnable, int32_t arg) {
std::scoped_lock lock{mLock};
- mQueue.push_back({type, clientId, jobId, runnable});
+ mQueue.push_back({type, clientId, jobId, runnable, arg});
mCondition.notify_one();
}
@@ -457,8 +491,7 @@
Event event = *mQueue.begin();
mQueue.pop_front();
- ALOGD("%s: job {%lld, %d}: %s", __FUNCTION__, (long long)event.clientId, event.jobId,
- toString(event.type));
+ ALOGD("%s: %s", __FUNCTION__, toString(event).c_str());
lock.unlock();
event.runnable();
diff --git a/media/libmediatranscoding/TranscodingClientManager.cpp b/media/libmediatranscoding/TranscodingClientManager.cpp
index ce3ac13..d9f3f28 100644
--- a/media/libmediatranscoding/TranscodingClientManager.cpp
+++ b/media/libmediatranscoding/TranscodingClientManager.cpp
@@ -23,6 +23,7 @@
#include <inttypes.h>
#include <media/TranscodingClientManager.h>
#include <media/TranscodingRequest.h>
+#include <private/android_filesystem_config.h>
#include <utils/Log.h>
namespace android {
@@ -44,6 +45,26 @@
TranscodingClientManager::sCookie2Client;
///////////////////////////////////////////////////////////////////////////////
+// Convenience methods for constructing binder::Status objects for error returns
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+ Status::fromServiceSpecificErrorWithMessage( \
+ errorCode, \
+ String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, ##__VA_ARGS__))
+
+// Can MediaTranscoding service trust the caller based on the calling UID?
+// TODO(hkuang): Add MediaProvider's UID.
+static bool isTrustedCallingUid(uid_t uid) {
+ switch (uid) {
+ case AID_ROOT: // root user
+ case AID_SYSTEM:
+ case AID_SHELL:
+ case AID_MEDIA: // mediaserver
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* ClientImpl implements a single client and contains all its information.
*/
@@ -60,8 +81,6 @@
* (casted to int64t_t) as the client id.
*/
ClientIdType mClientId;
- pid_t mClientPid;
- uid_t mClientUid;
std::string mClientName;
std::string mClientOpPackageName;
@@ -72,7 +91,7 @@
// Weak pointer to the client manager for this client.
std::weak_ptr<TranscodingClientManager> mOwner;
- ClientImpl(const std::shared_ptr<ITranscodingClientCallback>& callback, pid_t pid, uid_t uid,
+ ClientImpl(const std::shared_ptr<ITranscodingClientCallback>& callback,
const std::string& clientName, const std::string& opPackageName,
const std::weak_ptr<TranscodingClientManager>& owner);
@@ -88,14 +107,11 @@
};
TranscodingClientManager::ClientImpl::ClientImpl(
- const std::shared_ptr<ITranscodingClientCallback>& callback, pid_t pid, uid_t uid,
- const std::string& clientName, const std::string& opPackageName,
- const std::weak_ptr<TranscodingClientManager>& owner)
+ const std::shared_ptr<ITranscodingClientCallback>& callback, const std::string& clientName,
+ const std::string& opPackageName, const std::weak_ptr<TranscodingClientManager>& owner)
: mClientBinder((callback != nullptr) ? callback->asBinder() : nullptr),
mClientCallback(callback),
mClientId(sCookieCounter.fetch_add(1, std::memory_order_relaxed)),
- mClientPid(pid),
- mClientUid(uid),
mClientName(clientName),
mClientOpPackageName(opPackageName),
mNextJobId(0),
@@ -113,14 +129,52 @@
}
if (in_request.sourceFilePath.empty() || in_request.destinationFilePath.empty()) {
- // This is the only error we check for now.
return Status::ok();
}
+ int32_t callingPid = AIBinder_getCallingPid();
+ int32_t callingUid = AIBinder_getCallingUid();
+ int32_t in_clientUid = in_request.clientUid;
+ int32_t in_clientPid = in_request.clientPid;
+
+ // Check if we can trust clientUid. Only privilege caller could forward the
+ // uid on app client's behalf.
+ if (in_clientUid == IMediaTranscodingService::USE_CALLING_UID) {
+ in_clientUid = callingUid;
+ } else if (in_clientUid < 0) {
+ return Status::ok();
+ } else if (in_clientUid != callingUid && !isTrustedCallingUid(callingUid)) {
+ ALOGE("MediaTranscodingService::registerClient rejected (clientPid %d, clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientPid, in_clientUid, callingUid);
+ return STATUS_ERROR_FMT(
+ IMediaTranscodingService::ERROR_PERMISSION_DENIED,
+ "MediaTranscodingService::registerClient rejected (clientPid %d, clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientPid, in_clientUid, callingUid);
+ }
+
+ // Check if we can trust clientPid. Only privilege caller could forward the
+ // pid on app client's behalf.
+ if (in_clientPid == IMediaTranscodingService::USE_CALLING_PID) {
+ in_clientPid = callingPid;
+ } else if (in_clientPid < 0) {
+ return Status::ok();
+ } else if (in_clientPid != callingPid && !isTrustedCallingUid(callingUid)) {
+ ALOGE("MediaTranscodingService::registerClient rejected (clientPid %d, clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientPid, in_clientUid, callingUid);
+ return STATUS_ERROR_FMT(
+ IMediaTranscodingService::ERROR_PERMISSION_DENIED,
+ "MediaTranscodingService::registerClient rejected (clientPid %d, clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientPid, in_clientUid, callingUid);
+ }
+
int32_t jobId = mNextJobId.fetch_add(1);
- *_aidl_return =
- owner->mJobScheduler->submit(mClientId, jobId, mClientUid, in_request, mClientCallback);
+ *_aidl_return = owner->mJobScheduler->submit(mClientId, jobId, in_clientUid, in_request,
+ mClientCallback);
if (*_aidl_return) {
out_job->jobId = jobId;
@@ -246,11 +300,10 @@
}
status_t TranscodingClientManager::addClient(
- const std::shared_ptr<ITranscodingClientCallback>& callback, pid_t pid, uid_t uid,
- const std::string& clientName, const std::string& opPackageName,
- std::shared_ptr<ITranscodingClient>* outClient) {
+ const std::shared_ptr<ITranscodingClientCallback>& callback, const std::string& clientName,
+ const std::string& opPackageName, std::shared_ptr<ITranscodingClient>* outClient) {
// Validate the client.
- if (callback == nullptr || pid < 0 || clientName.empty() || opPackageName.empty()) {
+ if (callback == nullptr || clientName.empty() || opPackageName.empty()) {
ALOGE("Invalid client");
return IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT;
}
@@ -264,12 +317,11 @@
return IMediaTranscodingService::ERROR_ALREADY_EXISTS;
}
- // Creates the client and uses its process id as client id.
+ // Creates the client (with the id assigned by ClientImpl).
std::shared_ptr<ClientImpl> client = ::ndk::SharedRefBase::make<ClientImpl>(
- callback, pid, uid, clientName, opPackageName, shared_from_this());
+ callback, clientName, opPackageName, shared_from_this());
- ALOGD("Adding client id %lld, pid %d, uid %d, name %s, package %s",
- (long long)client->mClientId, client->mClientPid, client->mClientUid,
+ ALOGD("Adding client id %lld, name %s, package %s", (long long)client->mClientId,
client->mClientName.c_str(), client->mClientOpPackageName.c_str());
{
diff --git a/media/libmediatranscoding/TranscodingJobScheduler.cpp b/media/libmediatranscoding/TranscodingJobScheduler.cpp
index 3e4f319..24ac682 100644
--- a/media/libmediatranscoding/TranscodingJobScheduler.cpp
+++ b/media/libmediatranscoding/TranscodingJobScheduler.cpp
@@ -38,8 +38,13 @@
TranscodingJobScheduler::TranscodingJobScheduler(
const std::shared_ptr<TranscoderInterface>& transcoder,
- const std::shared_ptr<UidPolicyInterface>& uidPolicy)
- : mTranscoder(transcoder), mUidPolicy(uidPolicy), mCurrentJob(nullptr), mResourceLost(false) {
+ const std::shared_ptr<UidPolicyInterface>& uidPolicy,
+ const std::shared_ptr<ResourcePolicyInterface>& resourcePolicy)
+ : mTranscoder(transcoder),
+ mUidPolicy(uidPolicy),
+ mResourcePolicy(resourcePolicy),
+ mCurrentJob(nullptr),
+ mResourceLost(false) {
// Only push empty offline queue initially. Realtime queues are added when requests come in.
mUidSortedList.push_back(OFFLINE_UID);
mOfflineUidIterator = mUidSortedList.begin();
@@ -398,15 +403,24 @@
}
void TranscodingJobScheduler::onResourceLost() {
- ALOGV("%s", __FUNCTION__);
+ ALOGI("%s", __FUNCTION__);
std::scoped_lock lock{mLock};
+ if (mResourceLost) {
+ return;
+ }
+
// If we receive a resource loss event, the TranscoderLibrary already paused
// the transcoding, so we don't need to call onPaused to notify it to pause.
// Only need to update the job state here.
if (mCurrentJob != nullptr && mCurrentJob->state == Job::RUNNING) {
mCurrentJob->state = Job::PAUSED;
+ // Notify the client as a paused event.
+ auto clientCallback = mCurrentJob->callback.lock();
+ if (clientCallback != nullptr) {
+ clientCallback->onTranscodingPaused(mCurrentJob->key.second);
+ }
}
mResourceLost = true;
@@ -439,10 +453,14 @@
}
void TranscodingJobScheduler::onResourceAvailable() {
- ALOGV("%s", __FUNCTION__);
-
std::scoped_lock lock{mLock};
+ if (!mResourceLost) {
+ return;
+ }
+
+ ALOGI("%s", __FUNCTION__);
+
mResourceLost = false;
updateCurrentJob_l();
diff --git a/media/libmediatranscoding/TranscodingResourcePolicy.cpp b/media/libmediatranscoding/TranscodingResourcePolicy.cpp
new file mode 100644
index 0000000..4fd8338
--- /dev/null
+++ b/media/libmediatranscoding/TranscodingResourcePolicy.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TranscodingResourcePolicy"
+
+#include <aidl/android/media/BnResourceObserver.h>
+#include <aidl/android/media/IResourceObserverService.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <binder/IServiceManager.h>
+#include <media/TranscodingResourcePolicy.h>
+#include <utils/Log.h>
+
+namespace android {
+
+using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::BnResourceObserver;
+using ::aidl::android::media::IResourceObserverService;
+using ::aidl::android::media::MediaObservableEvent;
+using ::aidl::android::media::MediaObservableFilter;
+using ::aidl::android::media::MediaObservableParcel;
+using ::aidl::android::media::MediaObservableType;
+
+static std::string toString(const MediaObservableParcel& observable) {
+ return "{" + ::aidl::android::media::toString(observable.type) + ", " +
+ std::to_string(observable.value) + "}";
+}
+
+struct TranscodingResourcePolicy::ResourceObserver : public BnResourceObserver {
+ explicit ResourceObserver(TranscodingResourcePolicy* owner) : mOwner(owner), mPid(getpid()) {}
+
+ // IResourceObserver
+ ::ndk::ScopedAStatus onStatusChanged(
+ MediaObservableEvent event, int32_t uid, int32_t pid,
+ const std::vector<MediaObservableParcel>& observables) override {
+ ALOGD("%s: %s, uid %d, pid %d, %s", __FUNCTION__,
+ ::aidl::android::media::toString(event).c_str(), uid, pid,
+ toString(observables[0]).c_str());
+
+ // Only report kIdle event for codec resources from other processes.
+ if (((uint64_t)event & (uint64_t)MediaObservableEvent::kIdle) != 0 && (pid != mPid)) {
+ for (auto& observable : observables) {
+ if (observable.type == MediaObservableType::kVideoSecureCodec ||
+ observable.type == MediaObservableType::kVideoNonSecureCodec) {
+ mOwner->onResourceAvailable();
+ break;
+ }
+ }
+ }
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ TranscodingResourcePolicy* mOwner;
+ const pid_t mPid;
+};
+
+// static
+void TranscodingResourcePolicy::BinderDiedCallback(void* cookie) {
+ TranscodingResourcePolicy* owner = reinterpret_cast<TranscodingResourcePolicy*>(cookie);
+ if (owner != nullptr) {
+ owner->unregisterSelf();
+ }
+ // TODO(chz): retry to connecting to IResourceObserverService after failure.
+ // Also need to have back-up logic if IResourceObserverService is offline for
+ // Prolonged period of time. A possible alternative could be, during period where
+ // IResourceObserverService is not available, trigger onResourceAvailable() everytime
+ // when top uid changes (in hope that'll free up some codec instances that we could
+ // reclaim).
+}
+
+TranscodingResourcePolicy::TranscodingResourcePolicy()
+ : mRegistered(false), mDeathRecipient(AIBinder_DeathRecipient_new(BinderDiedCallback)) {
+ registerSelf();
+}
+
+TranscodingResourcePolicy::~TranscodingResourcePolicy() {
+ unregisterSelf();
+}
+
+void TranscodingResourcePolicy::registerSelf() {
+ ALOGI("TranscodingResourcePolicy: registerSelf");
+
+ ::ndk::SpAIBinder binder(AServiceManager_getService("media.resource_observer"));
+
+ std::scoped_lock lock{mRegisteredLock};
+
+ if (mRegistered) {
+ return;
+ }
+
+ // TODO(chz): retry to connecting to IResourceObserverService after failure.
+ mService = IResourceObserverService::fromBinder(binder);
+ if (mService == nullptr) {
+ ALOGE("Failed to get IResourceObserverService");
+ return;
+ }
+
+ // Only register filters for codec resource available.
+ mObserver = ::ndk::SharedRefBase::make<ResourceObserver>(this);
+ std::vector<MediaObservableFilter> filters = {
+ {MediaObservableType::kVideoSecureCodec, MediaObservableEvent::kIdle},
+ {MediaObservableType::kVideoNonSecureCodec, MediaObservableEvent::kIdle}};
+
+ Status status = mService->registerObserver(mObserver, filters);
+ if (!status.isOk()) {
+ ALOGE("failed to register: error %d", status.getServiceSpecificError());
+ mService = nullptr;
+ mObserver = nullptr;
+ return;
+ }
+
+ AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(), reinterpret_cast<void*>(this));
+
+ ALOGD("@@@ registered observer");
+ mRegistered = true;
+}
+
+void TranscodingResourcePolicy::unregisterSelf() {
+ ALOGI("TranscodingResourcePolicy: unregisterSelf");
+
+ std::scoped_lock lock{mRegisteredLock};
+
+ if (!mRegistered) {
+ return;
+ }
+
+ ::ndk::SpAIBinder binder = mService->asBinder();
+ if (binder.get() != nullptr) {
+ Status status = mService->unregisterObserver(mObserver);
+ AIBinder_unlinkToDeath(binder.get(), mDeathRecipient.get(), reinterpret_cast<void*>(this));
+ }
+
+ mService = nullptr;
+ mObserver = nullptr;
+ mRegistered = false;
+}
+
+void TranscodingResourcePolicy::setCallback(
+ const std::shared_ptr<ResourcePolicyCallbackInterface>& cb) {
+ std::scoped_lock lock{mCallbackLock};
+ mResourcePolicyCallback = cb;
+}
+
+void TranscodingResourcePolicy::onResourceAvailable() {
+ std::shared_ptr<ResourcePolicyCallbackInterface> cb;
+ {
+ std::scoped_lock lock{mCallbackLock};
+ cb = mResourcePolicyCallback.lock();
+ }
+
+ if (cb != nullptr) {
+ cb->onResourceAvailable();
+ }
+}
+} // namespace android
diff --git a/media/libmediatranscoding/TranscodingUidPolicy.cpp b/media/libmediatranscoding/TranscodingUidPolicy.cpp
index b72a2b9..fd41f65 100644
--- a/media/libmediatranscoding/TranscodingUidPolicy.cpp
+++ b/media/libmediatranscoding/TranscodingUidPolicy.cpp
@@ -17,6 +17,10 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "TranscodingUidPolicy"
+#include <aidl/android/media/BnResourceManagerClient.h>
+#include <aidl/android/media/IResourceManagerService.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
#include <binder/ActivityManager.h>
#include <cutils/misc.h> // FIRST_APPLICATION_UID
#include <inttypes.h>
@@ -30,6 +34,43 @@
constexpr static uid_t OFFLINE_UID = -1;
constexpr static const char* kTranscodingTag = "transcoding";
+/*
+ * The OOM score we're going to ask ResourceManager to use for our native transcoding
+ * service. ResourceManager issues reclaims based on these scores. It gets the scores
+ * from ActivityManagerService, which doesn't track native services. The values of the
+ * OOM scores are defined in:
+ * frameworks/base/services/core/java/com/android/server/am/ProcessList.java
+ * We use SERVICE_ADJ which is lower priority than an app possibly visible to the
+ * user, but higher priority than a cached app (which could be killed without disruption
+ * to the user).
+ */
+constexpr static int32_t SERVICE_ADJ = 500;
+
+using Status = ::ndk::ScopedAStatus;
+using aidl::android::media::BnResourceManagerClient;
+using aidl::android::media::IResourceManagerService;
+
+/*
+ * Placeholder ResourceManagerClient for registering process info override
+ * with the IResourceManagerService. This is only used as a token by the service
+ * to get notifications about binder death, not used for reclaiming resources.
+ */
+struct TranscodingUidPolicy::ResourceManagerClient : public BnResourceManagerClient {
+ explicit ResourceManagerClient() = default;
+
+ Status reclaimResource(bool* _aidl_return) override {
+ *_aidl_return = false;
+ return Status::ok();
+ }
+
+ Status getName(::std::string* _aidl_return) override {
+ _aidl_return->clear();
+ return Status::ok();
+ }
+
+ virtual ~ResourceManagerClient() = default;
+};
+
struct TranscodingUidPolicy::UidObserver : public BnUidObserver,
public virtual IBinder::DeathRecipient {
explicit UidObserver(TranscodingUidPolicy* owner) : mOwner(owner) {}
@@ -74,6 +115,7 @@
mRegistered(false),
mTopUidState(ActivityManager::PROCESS_STATE_UNKNOWN) {
registerSelf();
+ setProcessInfoOverride();
}
TranscodingUidPolicy::~TranscodingUidPolicy() {
@@ -109,6 +151,22 @@
ALOGI("TranscodingUidPolicy: Unregistered with ActivityManager");
}
+void TranscodingUidPolicy::setProcessInfoOverride() {
+ ::ndk::SpAIBinder binder(AServiceManager_getService("media.resource_manager"));
+ std::shared_ptr<IResourceManagerService> service = IResourceManagerService::fromBinder(binder);
+ if (service == nullptr) {
+ ALOGE("Failed to get IResourceManagerService");
+ return;
+ }
+
+ mProcInfoOverrideClient = ::ndk::SharedRefBase::make<ResourceManagerClient>();
+ Status status = service->overrideProcessInfo(
+ mProcInfoOverrideClient, getpid(), ActivityManager::PROCESS_STATE_SERVICE, SERVICE_ADJ);
+ if (!status.isOk()) {
+ ALOGW("Failed to setProcessInfoOverride.");
+ }
+}
+
void TranscodingUidPolicy::setUidObserverRegistered(bool registered) {
Mutex::Autolock _l(mUidLock);
diff --git a/media/libmediatranscoding/aidl/android/media/IMediaTranscodingService.aidl b/media/libmediatranscoding/aidl/android/media/IMediaTranscodingService.aidl
index 40ca2c2..7fc7748 100644
--- a/media/libmediatranscoding/aidl/android/media/IMediaTranscodingService.aidl
+++ b/media/libmediatranscoding/aidl/android/media/IMediaTranscodingService.aidl
@@ -58,17 +58,13 @@
* the client.
* @param clientName name of the client.
* @param opPackageName op package name of the client.
- * @param clientUid user id of the client.
- * @param clientPid process id of the client.
* @return an ITranscodingClient interface object, with nullptr indicating
* failure to register.
*/
ITranscodingClient registerClient(
in ITranscodingClientCallback callback,
in String clientName,
- in String opPackageName,
- in int clientUid,
- in int clientPid);
+ in String opPackageName);
/**
* Returns the number of clients. This is used for debugging.
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingRequestParcel.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingRequestParcel.aidl
index 83ea707..14d19ba 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingRequestParcel.aidl
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingRequestParcel.aidl
@@ -39,6 +39,20 @@
@utf8InCpp String destinationFilePath;
/**
+ * The UID of the client that this transcoding request is for. Only privileged caller could
+ * set this Uid as only they could do the transcoding on behalf of the client.
+ * -1 means not available.
+ */
+ int clientUid = -1;
+
+ /**
+ * The PID of the client that this transcoding request is for. Only privileged caller could
+ * set this Uid as only they could do the transcoding on behalf of the client.
+ * -1 means not available.
+ */
+ int clientPid = -1;
+
+ /**
* Type of the transcoding.
*/
TranscodingType transcodingType;
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingVideoTrackFormat.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingVideoTrackFormat.aidl
index a567a95..90502cd 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingVideoTrackFormat.aidl
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingVideoTrackFormat.aidl
@@ -67,4 +67,18 @@
* -1 means unavailable.
*/
int level = -1;
+
+ /**
+ * Decoder operating rate. This is used to work around the fact that vendor does not boost the
+ * hardware to maximum speed in transcoding usage case. This operating rate will be applied
+ * to decoder inside MediaTranscoder. -1 means unavailable.
+ */
+ int decoderOperatingRate = -1;
+
+ /**
+ * Encoder operating rate. This is used to work around the fact that vendor does not boost the
+ * hardware to maximum speed in transcoding usage case. This operating rate will be applied
+ * to encoder inside MediaTranscoder. -1 means unavailable.
+ */
+ int encoderOperatingRate = -1;
}
diff --git a/media/libmediatranscoding/include/media/ResourcePolicyInterface.h b/media/libmediatranscoding/include/media/ResourcePolicyInterface.h
new file mode 100644
index 0000000..8bd7d6b
--- /dev/null
+++ b/media/libmediatranscoding/include/media/ResourcePolicyInterface.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_RESOURCE_POLICY_INTERFACE_H
+#define ANDROID_MEDIA_RESOURCE_POLICY_INTERFACE_H
+#include <memory>
+namespace android {
+
+class ResourcePolicyCallbackInterface;
+
+// Interface for the JobScheduler to control the resource status updates.
+class ResourcePolicyInterface {
+public:
+ // Set the associated callback interface to send the events when resource
+ // status changes. (Set to nullptr will stop the updates.)
+ virtual void setCallback(const std::shared_ptr<ResourcePolicyCallbackInterface>& cb) = 0;
+
+protected:
+ virtual ~ResourcePolicyInterface() = default;
+};
+
+// Interface for notifying the JobScheduler of a change in resource status.
+class ResourcePolicyCallbackInterface {
+public:
+ // Called when codec resources become available. The scheduler may use this
+ // as a signal to attempt restart transcoding jobs that were previously
+ // paused due to temporary resource loss.
+ virtual void onResourceAvailable() = 0;
+
+protected:
+ virtual ~ResourcePolicyCallbackInterface() = default;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_RESOURCE_POLICY_INTERFACE_H
diff --git a/media/libmediatranscoding/include/media/TranscoderWrapper.h b/media/libmediatranscoding/include/media/TranscoderWrapper.h
index a4c92c5..c956042 100644
--- a/media/libmediatranscoding/include/media/TranscoderWrapper.h
+++ b/media/libmediatranscoding/include/media/TranscoderWrapper.h
@@ -18,6 +18,7 @@
#define ANDROID_TRANSCODER_WRAPPER_H
#include <android-base/thread_annotations.h>
+#include <media/NdkMediaError.h>
#include <media/TranscoderInterface.h>
#include <list>
@@ -55,6 +56,7 @@
ClientIdType clientId;
JobIdType jobId;
std::function<void()> runnable;
+ int32_t arg;
};
using JobKeyType = std::pair<ClientIdType, JobIdType>;
@@ -68,26 +70,27 @@
ClientIdType mCurrentClientId;
JobIdType mCurrentJobId;
- static const char* toString(Event::Type type);
+ static std::string toString(const Event& event);
void onFinish(ClientIdType clientId, JobIdType jobId);
- void onError(ClientIdType clientId, JobIdType jobId, TranscodingErrorCode error);
+ void onError(ClientIdType clientId, JobIdType jobId, media_status_t status);
void onProgress(ClientIdType clientId, JobIdType jobId, int32_t progress);
- TranscodingErrorCode handleStart(ClientIdType clientId, JobIdType jobId,
- const TranscodingRequestParcel& request,
- const std::shared_ptr<ITranscodingClientCallback>& callback);
- TranscodingErrorCode handlePause(ClientIdType clientId, JobIdType jobId);
- TranscodingErrorCode handleResume(ClientIdType clientId, JobIdType jobId,
- const TranscodingRequestParcel& request,
- const std::shared_ptr<ITranscodingClientCallback>& callback);
- TranscodingErrorCode setupTranscoder(
- ClientIdType clientId, JobIdType jobId, const TranscodingRequestParcel& request,
- const std::shared_ptr<ITranscodingClientCallback>& callback,
- const std::shared_ptr<const Parcel>& pausedState = nullptr);
+ media_status_t handleStart(ClientIdType clientId, JobIdType jobId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& callback);
+ media_status_t handlePause(ClientIdType clientId, JobIdType jobId);
+ media_status_t handleResume(ClientIdType clientId, JobIdType jobId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& callback);
+ media_status_t setupTranscoder(ClientIdType clientId, JobIdType jobId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& callback,
+ const std::shared_ptr<const Parcel>& pausedState = nullptr);
void cleanup();
+ void reportError(ClientIdType clientId, JobIdType jobId, media_status_t err);
void queueEvent(Event::Type type, ClientIdType clientId, JobIdType jobId,
- const std::function<void()> runnable);
+ const std::function<void()> runnable, int32_t arg = 0);
void threadLoop();
};
diff --git a/media/libmediatranscoding/include/media/TranscodingClientManager.h b/media/libmediatranscoding/include/media/TranscodingClientManager.h
index a62ad8c..015a83a 100644
--- a/media/libmediatranscoding/include/media/TranscodingClientManager.h
+++ b/media/libmediatranscoding/include/media/TranscodingClientManager.h
@@ -58,16 +58,14 @@
* already been added, it will also return non-zero errorcode.
*
* @param callback client callback for the service to call this client.
- * @param pid client's process id.
- * @param uid client's user id.
* @param clientName client's name.
* @param opPackageName client's package name.
* @param client output holding the ITranscodingClient interface for the client
* to use for subsequent communications with the service.
* @return 0 if client is added successfully, non-zero errorcode otherwise.
*/
- status_t addClient(const std::shared_ptr<ITranscodingClientCallback>& callback, pid_t pid,
- uid_t uid, const std::string& clientName, const std::string& opPackageName,
+ status_t addClient(const std::shared_ptr<ITranscodingClientCallback>& callback,
+ const std::string& clientName, const std::string& opPackageName,
std::shared_ptr<ITranscodingClient>* client);
/**
diff --git a/media/libmediatranscoding/include/media/TranscodingJobScheduler.h b/media/libmediatranscoding/include/media/TranscodingJobScheduler.h
index 5ccadad..8f5e2aa 100644
--- a/media/libmediatranscoding/include/media/TranscodingJobScheduler.h
+++ b/media/libmediatranscoding/include/media/TranscodingJobScheduler.h
@@ -18,6 +18,7 @@
#define ANDROID_MEDIA_TRANSCODING_JOB_SCHEDULER_H
#include <aidl/android/media/TranscodingJobPriority.h>
+#include <media/ResourcePolicyInterface.h>
#include <media/SchedulerClientInterface.h>
#include <media/TranscoderInterface.h>
#include <media/TranscodingRequest.h>
@@ -34,7 +35,8 @@
class TranscodingJobScheduler : public UidPolicyCallbackInterface,
public SchedulerClientInterface,
- public TranscoderCallbackInterface {
+ public TranscoderCallbackInterface,
+ public ResourcePolicyCallbackInterface {
public:
virtual ~TranscodingJobScheduler();
@@ -58,9 +60,12 @@
// UidPolicyCallbackInterface
void onTopUidsChanged(const std::unordered_set<uid_t>& uids) override;
- void onResourceAvailable() override;
// ~UidPolicyCallbackInterface
+ // ResourcePolicyCallbackInterface
+ void onResourceAvailable() override;
+ // ~ResourcePolicyCallbackInterface
+
private:
friend class MediaTranscodingService;
friend class TranscodingJobSchedulerTest;
@@ -96,13 +101,15 @@
std::shared_ptr<TranscoderInterface> mTranscoder;
std::shared_ptr<UidPolicyInterface> mUidPolicy;
+ std::shared_ptr<ResourcePolicyInterface> mResourcePolicy;
Job* mCurrentJob;
bool mResourceLost;
// Only allow MediaTranscodingService and unit tests to instantiate.
TranscodingJobScheduler(const std::shared_ptr<TranscoderInterface>& transcoder,
- const std::shared_ptr<UidPolicyInterface>& uidPolicy);
+ const std::shared_ptr<UidPolicyInterface>& uidPolicy,
+ const std::shared_ptr<ResourcePolicyInterface>& resourcePolicy);
Job* getTopJob_l();
void updateCurrentJob_l();
diff --git a/media/libmediatranscoding/include/media/TranscodingRequest.h b/media/libmediatranscoding/include/media/TranscodingRequest.h
index 63de1fb..a6cfed2 100644
--- a/media/libmediatranscoding/include/media/TranscodingRequest.h
+++ b/media/libmediatranscoding/include/media/TranscodingRequest.h
@@ -37,6 +37,8 @@
void setTo(const TranscodingRequestParcel& parcel) {
sourceFilePath = parcel.sourceFilePath;
destinationFilePath = parcel.destinationFilePath;
+ clientUid = parcel.clientUid;
+ clientPid = parcel.clientPid;
transcodingType = parcel.transcodingType;
requestedVideoTrackFormat = parcel.requestedVideoTrackFormat;
priority = parcel.priority;
diff --git a/media/libmediatranscoding/include/media/TranscodingResourcePolicy.h b/media/libmediatranscoding/include/media/TranscodingResourcePolicy.h
new file mode 100644
index 0000000..0836eda
--- /dev/null
+++ b/media/libmediatranscoding/include/media/TranscodingResourcePolicy.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRANSCODING_RESOURCE_POLICY_H
+#define ANDROID_MEDIA_TRANSCODING_RESOURCE_POLICY_H
+
+#include <android/binder_auto_utils.h>
+#include <media/ResourcePolicyInterface.h>
+#include <utils/Condition.h>
+
+#include <mutex>
+namespace aidl {
+namespace android {
+namespace media {
+class IResourceObserverService;
+}
+} // namespace android
+} // namespace aidl
+
+namespace android {
+
+using ::aidl::android::media::IResourceObserverService;
+
+class TranscodingResourcePolicy : public ResourcePolicyInterface {
+public:
+ explicit TranscodingResourcePolicy();
+ ~TranscodingResourcePolicy();
+
+ void setCallback(const std::shared_ptr<ResourcePolicyCallbackInterface>& cb) override;
+
+private:
+ struct ResourceObserver;
+ mutable std::mutex mRegisteredLock;
+ bool mRegistered GUARDED_BY(mRegisteredLock);
+ std::shared_ptr<IResourceObserverService> mService GUARDED_BY(mRegisteredLock);
+ std::shared_ptr<ResourceObserver> mObserver;
+
+ mutable std::mutex mCallbackLock;
+ std::weak_ptr<ResourcePolicyCallbackInterface> mResourcePolicyCallback
+ GUARDED_BY(mCallbackLock);
+
+ ::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
+
+ static void BinderDiedCallback(void* cookie);
+
+ void registerSelf();
+ void unregisterSelf();
+ void onResourceAvailable();
+}; // class TranscodingUidPolicy
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRANSCODING_RESOURCE_POLICY_H
diff --git a/media/libmediatranscoding/include/media/TranscodingUidPolicy.h b/media/libmediatranscoding/include/media/TranscodingUidPolicy.h
index 27dadd2..8319eee 100644
--- a/media/libmediatranscoding/include/media/TranscodingUidPolicy.h
+++ b/media/libmediatranscoding/include/media/TranscodingUidPolicy.h
@@ -53,10 +53,12 @@
void setUidObserverRegistered(bool registerd);
void registerSelf();
void unregisterSelf();
+ void setProcessInfoOverride();
int32_t getProcState_l(uid_t uid) NO_THREAD_SAFETY_ANALYSIS;
void updateTopUid_l() NO_THREAD_SAFETY_ANALYSIS;
struct UidObserver;
+ struct ResourceManagerClient;
mutable Mutex mUidLock;
std::shared_ptr<ActivityManager> mAm;
sp<UidObserver> mUidObserver;
@@ -65,6 +67,7 @@
std::unordered_map<uid_t, int32_t> mUidStateMap GUARDED_BY(mUidLock);
std::map<int32_t, std::unordered_set<uid_t>> mStateUidMap GUARDED_BY(mUidLock);
std::weak_ptr<UidPolicyCallbackInterface> mUidPolicyCallback;
+ std::shared_ptr<ResourceManagerClient> mProcInfoOverrideClient;
}; // class TranscodingUidPolicy
} // namespace android
diff --git a/media/libmediatranscoding/include/media/UidPolicyInterface.h b/media/libmediatranscoding/include/media/UidPolicyInterface.h
index dc28027..f88c1ed 100644
--- a/media/libmediatranscoding/include/media/UidPolicyInterface.h
+++ b/media/libmediatranscoding/include/media/UidPolicyInterface.h
@@ -41,19 +41,13 @@
virtual ~UidPolicyInterface() = default;
};
-// Interface for notifying the scheduler of a change in uid states or
-// transcoding resource availability.
+// Interface for notifying the scheduler of a change in uid states.
class UidPolicyCallbackInterface {
public:
// Called when the set of uids that's top priority among the uids of interest
// has changed. The receiver of this callback should adjust accordingly.
virtual void onTopUidsChanged(const std::unordered_set<uid_t>& uids) = 0;
- // Called when resources become available for transcoding use. The scheduler
- // may use this as a signal to attempt restart transcoding activity that
- // were previously paused due to temporary resource loss.
- virtual void onResourceAvailable() = 0;
-
protected:
virtual ~UidPolicyCallbackInterface() = default;
};
diff --git a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
index 1583325..41f3ada 100644
--- a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
@@ -43,12 +43,11 @@
using ::aidl::android::media::TranscodingRequestParcel;
using ::aidl::android::media::TranscodingResultParcel;
-constexpr pid_t kInvalidClientPid = -1;
+constexpr pid_t kInvalidClientPid = -5;
+constexpr pid_t kInvalidClientUid = -10;
constexpr const char* kInvalidClientName = "";
constexpr const char* kInvalidClientPackage = "";
-constexpr pid_t kClientPid = 2;
-constexpr uid_t kClientUid = 3;
constexpr const char* kClientName = "TestClientName";
constexpr const char* kClientPackage = "TestClientPackage";
@@ -236,17 +235,17 @@
~TranscodingClientManagerTest() { ALOGD("TranscodingClientManagerTest destroyed"); }
void addMultipleClients() {
- EXPECT_EQ(mClientManager->addClient(mClientCallback1, kClientPid, kClientUid, kClientName,
+ EXPECT_EQ(mClientManager->addClient(mClientCallback1, kClientName,
kClientPackage, &mClient1),
OK);
EXPECT_NE(mClient1, nullptr);
- EXPECT_EQ(mClientManager->addClient(mClientCallback2, kClientPid, kClientUid, kClientName,
+ EXPECT_EQ(mClientManager->addClient(mClientCallback2, kClientName,
kClientPackage, &mClient2),
OK);
EXPECT_NE(mClient2, nullptr);
- EXPECT_EQ(mClientManager->addClient(mClientCallback3, kClientPid, kClientUid, kClientName,
+ EXPECT_EQ(mClientManager->addClient(mClientCallback3, kClientName,
kClientPackage, &mClient3),
OK);
EXPECT_NE(mClient3, nullptr);
@@ -274,23 +273,23 @@
TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientCallback) {
// Add a client with null callback and expect failure.
std::shared_ptr<ITranscodingClient> client;
- status_t err = mClientManager->addClient(nullptr, kClientPid, kClientUid, kClientName,
+ status_t err = mClientManager->addClient(nullptr, kClientName,
kClientPackage, &client);
EXPECT_EQ(err, IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT);
}
-
-TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientPid) {
- // Add a client with invalid Pid and expect failure.
- std::shared_ptr<ITranscodingClient> client;
- status_t err = mClientManager->addClient(mClientCallback1, kInvalidClientPid, kClientUid,
- kClientName, kClientPackage, &client);
- EXPECT_EQ(err, IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT);
-}
+//
+//TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientPid) {
+// // Add a client with invalid Pid and expect failure.
+// std::shared_ptr<ITranscodingClient> client;
+// status_t err = mClientManager->addClient(mClientCallback1,
+// kClientName, kClientPackage, &client);
+// EXPECT_EQ(err, IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT);
+//}
TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientName) {
// Add a client with invalid name and expect failure.
std::shared_ptr<ITranscodingClient> client;
- status_t err = mClientManager->addClient(mClientCallback1, kClientPid, kClientUid,
+ status_t err = mClientManager->addClient(mClientCallback1,
kInvalidClientName, kClientPackage, &client);
EXPECT_EQ(err, IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT);
}
@@ -298,7 +297,7 @@
TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientPackageName) {
// Add a client with invalid packagename and expect failure.
std::shared_ptr<ITranscodingClient> client;
- status_t err = mClientManager->addClient(mClientCallback1, kClientPid, kClientUid, kClientName,
+ status_t err = mClientManager->addClient(mClientCallback1, kClientName,
kInvalidClientPackage, &client);
EXPECT_EQ(err, IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT);
}
@@ -306,7 +305,7 @@
TEST_F(TranscodingClientManagerTest, TestAddingValidClient) {
// Add a valid client, should succeed.
std::shared_ptr<ITranscodingClient> client;
- status_t err = mClientManager->addClient(mClientCallback1, kClientPid, kClientUid, kClientName,
+ status_t err = mClientManager->addClient(mClientCallback1, kClientName,
kClientPackage, &client);
EXPECT_EQ(err, OK);
EXPECT_NE(client.get(), nullptr);
@@ -320,14 +319,14 @@
TEST_F(TranscodingClientManagerTest, TestAddingDupliacteClient) {
std::shared_ptr<ITranscodingClient> client;
- status_t err = mClientManager->addClient(mClientCallback1, kClientPid, kClientUid, kClientName,
+ status_t err = mClientManager->addClient(mClientCallback1, kClientName,
kClientPackage, &client);
EXPECT_EQ(err, OK);
EXPECT_NE(client.get(), nullptr);
EXPECT_EQ(mClientManager->getNumOfClients(), 1);
std::shared_ptr<ITranscodingClient> dupClient;
- err = mClientManager->addClient(mClientCallback1, kClientPid, kClientUid, "dupClient",
+ err = mClientManager->addClient(mClientCallback1, "dupClient",
"dupPackage", &dupClient);
EXPECT_EQ(err, IMediaTranscodingService::ERROR_ALREADY_EXISTS);
EXPECT_EQ(dupClient.get(), nullptr);
@@ -337,8 +336,7 @@
EXPECT_TRUE(status.isOk());
EXPECT_EQ(mClientManager->getNumOfClients(), 0);
- err = mClientManager->addClient(mClientCallback1, kClientPid, kClientUid, "dupClient",
- "dupPackage", &dupClient);
+ err = mClientManager->addClient(mClientCallback1, "dupClient", "dupPackage", &dupClient);
EXPECT_EQ(err, OK);
EXPECT_NE(dupClient.get(), nullptr);
EXPECT_EQ(mClientManager->getNumOfClients(), 1);
@@ -385,6 +383,14 @@
EXPECT_TRUE(mClient1->submitRequest(badRequest, &job, &result).isOk());
EXPECT_FALSE(result);
+ // Test submit with bad pid/uid.
+ badRequest.sourceFilePath = "test_source_file_3";
+ badRequest.destinationFilePath = "test_desintaion_file_3";
+ badRequest.clientPid = kInvalidClientPid;
+ badRequest.clientUid = kInvalidClientUid;
+ EXPECT_TRUE(mClient1->submitRequest(badRequest, &job, &result).isOk());
+ EXPECT_FALSE(result);
+
// Test get jobs by id.
EXPECT_TRUE(mClient1->getJobWithId(JOB(2), &job, &result).isOk());
EXPECT_EQ(job.jobId, JOB(2));
@@ -468,7 +474,7 @@
TEST_F(TranscodingClientManagerTest, TestUseAfterUnregister) {
// Add a client.
std::shared_ptr<ITranscodingClient> client;
- status_t err = mClientManager->addClient(mClientCallback1, kClientPid, kClientUid, kClientName,
+ status_t err = mClientManager->addClient(mClientCallback1, kClientName,
kClientPackage, &client);
EXPECT_EQ(err, OK);
EXPECT_NE(client.get(), nullptr);
diff --git a/media/libmediatranscoding/tests/TranscodingJobScheduler_tests.cpp b/media/libmediatranscoding/tests/TranscodingJobScheduler_tests.cpp
index d21b595..9b9df87 100644
--- a/media/libmediatranscoding/tests/TranscodingJobScheduler_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingJobScheduler_tests.cpp
@@ -213,7 +213,8 @@
ALOGI("TranscodingJobSchedulerTest set up");
mTranscoder.reset(new TestTranscoder());
mUidPolicy.reset(new TestUidPolicy());
- mScheduler.reset(new TranscodingJobScheduler(mTranscoder, mUidPolicy));
+ mScheduler.reset(
+ new TranscodingJobScheduler(mTranscoder, mUidPolicy, nullptr /*resourcePolicy*/));
mUidPolicy->setCallback(mScheduler);
// Set priority only, ignore other fields for now.
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
index e850d66..f35ea99 100644
--- a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -284,7 +284,7 @@
format = std::shared_ptr<AMediaFormat>(mergedFormat, &AMediaFormat_delete);
}
- transcoder->configure(mSampleReader, trackIndex, format);
+ status = transcoder->configure(mSampleReader, trackIndex, format);
if (status != AMEDIA_OK) {
LOG(ERROR) << "Configure track transcoder for track #" << trackIndex << " returned error "
<< status;
diff --git a/media/libshmem/Android.bp b/media/libshmem/Android.bp
index ee33f9e..fae98ed 100644
--- a/media/libshmem/Android.bp
+++ b/media/libshmem/Android.bp
@@ -15,12 +15,12 @@
"libbinder",
"libshmemutil",
"libutils",
- "shared-file-region-aidl-cpp",
+ "shared-file-region-aidl-unstable-cpp",
],
export_shared_lib_headers: [
"libbinder",
"libutils",
- "shared-file-region-aidl-cpp",
+ "shared-file-region-aidl-unstable-cpp",
],
}
@@ -29,10 +29,10 @@
export_include_dirs: ["include"],
srcs: ["ShmemUtil.cpp"],
shared_libs: [
- "shared-file-region-aidl-cpp",
+ "shared-file-region-aidl-unstable-cpp",
],
export_shared_lib_headers: [
- "shared-file-region-aidl-cpp",
+ "shared-file-region-aidl-unstable-cpp",
],
}
@@ -44,7 +44,7 @@
"libshmemcompat",
"libshmemutil",
"libutils",
- "shared-file-region-aidl-cpp",
+ "shared-file-region-aidl-unstable-cpp",
],
test_suites: ["device-tests"],
}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 382491e..d2f347c 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2238,6 +2238,12 @@
}
err = setupG711Codec(encoder, sampleRate, numChannels);
}
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_OPUS)) {
+ int32_t numChannels = 1, sampleRate = 48000;
+ if (msg->findInt32("channel-count", &numChannels) &&
+ msg->findInt32("sample-rate", &sampleRate)) {
+ err = setupOpusCodec(encoder, sampleRate, numChannels);
+ }
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) {
// numChannels needs to be set to properly communicate PCM values.
int32_t numChannels = 2, sampleRate = 44100, compressionLevel = -1;
@@ -3117,6 +3123,26 @@
kPortIndexInput, sampleRate, numChannels);
}
+status_t ACodec::setupOpusCodec(bool encoder, int32_t sampleRate, int32_t numChannels) {
+ if (encoder) {
+ return INVALID_OPERATION;
+ }
+ OMX_AUDIO_PARAM_ANDROID_OPUSTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexInput;
+ status_t err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus, &def, sizeof(def));
+ if (err != OK) {
+ ALOGE("setupOpusCodec(): Error %d getting OMX_IndexParamAudioAndroidOpus parameter", err);
+ return err;
+ }
+ def.nSampleRate = sampleRate;
+ def.nChannels = numChannels;
+ err = mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus, &def, sizeof(def));
+ return err;
+}
+
status_t ACodec::setupFlacCodec(
bool encoder, int32_t numChannels, int32_t sampleRate, int32_t compressionLevel,
AudioEncoding encoding) {
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index c180edf..16977d7 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -211,7 +211,7 @@
],
static_libs: [
- "librenderengine",
+ "librenderfright",
],
export_include_dirs: [
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 5015787..86372e3 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1009,6 +1009,12 @@
return err;
}
+void MediaCodec::PostReplyWithError(const sp<AMessage> &msg, int32_t err) {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ PostReplyWithError(replyID, err);
+}
+
void MediaCodec::PostReplyWithError(const sp<AReplyToken> &replyID, int32_t err) {
int32_t finalErr = err;
if (mReleasedByResourceManager) {
@@ -1512,7 +1518,6 @@
mStickyError = OK;
// reset state not reset by setState(UNINITIALIZED)
- mReplyID = 0;
mDequeueInputReplyID = 0;
mDequeueOutputReplyID = 0;
mDequeueInputTimeoutGeneration = 0;
@@ -2165,7 +2170,7 @@
if (mState == RELEASING) {
mComponentName.clear();
}
- (new AMessage)->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages();
sendErrorResponse = false;
}
break;
@@ -2191,7 +2196,7 @@
case FLUSHED:
case STARTED:
{
- sendErrorResponse = false;
+ sendErrorResponse = (mReplyID != nullptr);
setStickyError(err);
postActivityNotificationIfPossible();
@@ -2221,7 +2226,7 @@
default:
{
- sendErrorResponse = false;
+ sendErrorResponse = (mReplyID != nullptr);
setStickyError(err);
postActivityNotificationIfPossible();
@@ -2248,7 +2253,15 @@
}
if (sendErrorResponse) {
- PostReplyWithError(mReplyID, err);
+ // TRICKY: replicate PostReplyWithError logic for
+ // err code override
+ int32_t finalErr = err;
+ if (mReleasedByResourceManager) {
+ // override the err code if MediaCodec has been
+ // released by ResourceManager.
+ finalErr = DEAD_OBJECT;
+ }
+ postPendingRepliesAndDeferredMessages(finalErr);
}
break;
}
@@ -2296,7 +2309,7 @@
MediaResource::CodecResource(mFlags & kFlagIsSecure, mIsVideo));
}
- (new AMessage)->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages();
break;
}
@@ -2335,7 +2348,7 @@
mFlags |= kFlagUsesSoftwareRenderer;
}
setState(CONFIGURED);
- (new AMessage)->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages();
// augment our media metrics info, now that we know more things
// such as what the codec extracted from any CSD passed in.
@@ -2380,6 +2393,12 @@
case kWhatInputSurfaceCreated:
{
+ if (mState != CONFIGURED) {
+ // state transitioned unexpectedly; we should have replied already.
+ ALOGD("received kWhatInputSurfaceCreated message in state %s",
+ stateString(mState).c_str());
+ break;
+ }
// response to initiateCreateInputSurface()
status_t err = NO_ERROR;
sp<AMessage> response = new AMessage;
@@ -2398,12 +2417,18 @@
} else {
response->setInt32("err", err);
}
- response->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages(response);
break;
}
case kWhatInputSurfaceAccepted:
{
+ if (mState != CONFIGURED) {
+ // state transitioned unexpectedly; we should have replied already.
+ ALOGD("received kWhatInputSurfaceAccepted message in state %s",
+ stateString(mState).c_str());
+ break;
+ }
// response to initiateSetInputSurface()
status_t err = NO_ERROR;
sp<AMessage> response = new AMessage();
@@ -2414,19 +2439,25 @@
} else {
response->setInt32("err", err);
}
- response->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages(response);
break;
}
case kWhatSignaledInputEOS:
{
+ if (!isExecuting()) {
+ // state transitioned unexpectedly; we should have replied already.
+ ALOGD("received kWhatSignaledInputEOS message in state %s",
+ stateString(mState).c_str());
+ break;
+ }
// response to signalEndOfInputStream()
sp<AMessage> response = new AMessage;
status_t err;
if (msg->findInt32("err", &err)) {
response->setInt32("err", err);
}
- response->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages(response);
break;
}
@@ -2446,7 +2477,7 @@
MediaResource::GraphicMemoryResource(getGraphicBufferSize()));
}
setState(STARTED);
- (new AMessage)->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages();
break;
}
@@ -2583,7 +2614,7 @@
break;
}
setState(INITIALIZED);
- (new AMessage)->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages();
break;
}
@@ -2608,7 +2639,7 @@
mReleaseSurface.reset();
if (mReplyID != nullptr) {
- (new AMessage)->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages();
}
if (mAsyncReleaseCompleteNotification != nullptr) {
flushMediametrics();
@@ -2633,7 +2664,7 @@
mCodec->signalResume();
}
- (new AMessage)->postReply(mReplyID);
+ postPendingRepliesAndDeferredMessages();
break;
}
@@ -2645,14 +2676,18 @@
case kWhatInit:
{
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
if (mState != UNINITIALIZED) {
- PostReplyWithError(replyID, INVALID_OPERATION);
+ PostReplyWithError(msg, INVALID_OPERATION);
break;
}
+ if (mReplyID) {
+ mDeferredMessages.push_back(msg);
+ break;
+ }
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
mReplyID = replyID;
setState(INITIALIZING);
@@ -2714,14 +2749,18 @@
case kWhatConfigure:
{
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
if (mState != INITIALIZED) {
- PostReplyWithError(replyID, INVALID_OPERATION);
+ PostReplyWithError(msg, INVALID_OPERATION);
break;
}
+ if (mReplyID) {
+ mDeferredMessages.push_back(msg);
+ break;
+ }
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
sp<RefBase> obj;
CHECK(msg->findObject("surface", &obj));
@@ -2859,15 +2898,19 @@
case kWhatCreateInputSurface:
case kWhatSetInputSurface:
{
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
// Must be configured, but can't have been started yet.
if (mState != CONFIGURED) {
- PostReplyWithError(replyID, INVALID_OPERATION);
+ PostReplyWithError(msg, INVALID_OPERATION);
break;
}
+ if (mReplyID) {
+ mDeferredMessages.push_back(msg);
+ break;
+ }
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
mReplyID = replyID;
if (msg->what() == kWhatCreateInputSurface) {
mCodec->initiateCreateInputSurface();
@@ -2882,9 +2925,6 @@
}
case kWhatStart:
{
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
if (mState == FLUSHED) {
setState(STARTED);
if (mHavePendingInputBuffers) {
@@ -2892,13 +2932,20 @@
mHavePendingInputBuffers = false;
}
mCodec->signalResume();
- PostReplyWithError(replyID, OK);
+ PostReplyWithError(msg, OK);
break;
} else if (mState != CONFIGURED) {
- PostReplyWithError(replyID, INVALID_OPERATION);
+ PostReplyWithError(msg, INVALID_OPERATION);
break;
}
+ if (mReplyID) {
+ mDeferredMessages.push_back(msg);
+ break;
+ }
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
mReplyID = replyID;
setState(STARTING);
@@ -2906,15 +2953,42 @@
break;
}
- case kWhatStop:
+ case kWhatStop: {
+ if (mReplyID) {
+ mDeferredMessages.push_back(msg);
+ break;
+ }
+ [[fallthrough]];
+ }
case kWhatRelease:
{
State targetState =
(msg->what() == kWhatStop) ? INITIALIZED : UNINITIALIZED;
+ if ((mState == RELEASING && targetState == UNINITIALIZED)
+ || (mState == STOPPING && targetState == INITIALIZED)) {
+ mDeferredMessages.push_back(msg);
+ break;
+ }
+
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
+ sp<AMessage> asyncNotify;
+ (void)msg->findMessage("async", &asyncNotify);
+ // post asyncNotify if going out of scope.
+ struct AsyncNotifyPost {
+ AsyncNotifyPost(const sp<AMessage> &asyncNotify) : mAsyncNotify(asyncNotify) {}
+ ~AsyncNotifyPost() {
+ if (mAsyncNotify) {
+ mAsyncNotify->post();
+ }
+ }
+ void clear() { mAsyncNotify.clear(); }
+ private:
+ sp<AMessage> mAsyncNotify;
+ } asyncNotifyPost{asyncNotify};
+
// already stopped/released
if (mState == UNINITIALIZED && mReleasedByResourceManager) {
sp<AMessage> response = new AMessage;
@@ -2926,7 +3000,13 @@
int32_t reclaimed = 0;
msg->findInt32("reclaimed", &reclaimed);
if (reclaimed) {
- mReleasedByResourceManager = true;
+ if (!mReleasedByResourceManager) {
+ // notify the async client
+ if (mFlags & kFlagIsAsync) {
+ onError(DEAD_OBJECT, ACTION_CODE_FATAL);
+ }
+ mReleasedByResourceManager = true;
+ }
int32_t force = 0;
msg->findInt32("force", &force);
@@ -2938,10 +3018,6 @@
response->setInt32("err", WOULD_BLOCK);
response->postReply(replyID);
- // notify the async client
- if (mFlags & kFlagIsAsync) {
- onError(DEAD_OBJECT, ACTION_CODE_FATAL);
- }
break;
}
}
@@ -2978,12 +3054,14 @@
// after this, and we'll no longer be able to reply.
if (mState == FLUSHING || mState == STOPPING
|| mState == CONFIGURING || mState == STARTING) {
- (new AMessage)->postReply(mReplyID);
+ // mReply is always set if in these states.
+ postPendingRepliesAndDeferredMessages();
}
if (mFlags & kFlagSawMediaServerDie) {
// It's dead, Jim. Don't expect initiateShutdown to yield
// any useful results now...
+ // Any pending reply would have been handled at kWhatError.
setState(UNINITIALIZED);
if (targetState == UNINITIALIZED) {
mComponentName.clear();
@@ -2997,12 +3075,12 @@
// reply now with an error to unblock the client, client can
// release after the failure (instead of ANR).
if (msg->what() == kWhatStop && (mFlags & kFlagStickyError)) {
+ // Any pending reply would have been handled at kWhatError.
PostReplyWithError(replyID, getStickyError());
break;
}
- sp<AMessage> asyncNotify;
- if (msg->findMessage("async", &asyncNotify) && asyncNotify != nullptr) {
+ if (asyncNotify != nullptr) {
if (mSurface != NULL) {
if (!mReleaseSurface) {
mReleaseSurface.reset(new ReleaseSurface);
@@ -3022,6 +3100,12 @@
}
}
+ if (mReplyID) {
+ // State transition replies are handled above, so this reply
+ // would not be related to state transition. As we are
+ // shutting down the component, just fail the operation.
+ postPendingRepliesAndDeferredMessages(UNKNOWN_ERROR);
+ }
mReplyID = replyID;
setState(msg->what() == kWhatStop ? STOPPING : RELEASING);
@@ -3036,8 +3120,8 @@
if (asyncNotify != nullptr) {
mResourceManagerProxy->markClientForPendingRemoval();
- (new AMessage)->postReply(mReplyID);
- mReplyID = 0;
+ postPendingRepliesAndDeferredMessages();
+ asyncNotifyPost.clear();
mAsyncReleaseCompleteNotification = asyncNotify;
}
@@ -3208,17 +3292,21 @@
case kWhatSignalEndOfInputStream:
{
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
if (!isExecuting() || !mHaveInputSurface) {
- PostReplyWithError(replyID, INVALID_OPERATION);
+ PostReplyWithError(msg, INVALID_OPERATION);
break;
} else if (mFlags & kFlagStickyError) {
- PostReplyWithError(replyID, getStickyError());
+ PostReplyWithError(msg, getStickyError());
break;
}
+ if (mReplyID) {
+ mDeferredMessages.push_back(msg);
+ break;
+ }
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
mReplyID = replyID;
mCodec->signalEndOfInputStream();
break;
@@ -3260,17 +3348,21 @@
case kWhatFlush:
{
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
if (!isExecuting()) {
- PostReplyWithError(replyID, INVALID_OPERATION);
+ PostReplyWithError(msg, INVALID_OPERATION);
break;
} else if (mFlags & kFlagStickyError) {
- PostReplyWithError(replyID, getStickyError());
+ PostReplyWithError(msg, getStickyError());
break;
}
+ if (mReplyID) {
+ mDeferredMessages.push_back(msg);
+ break;
+ }
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
mReplyID = replyID;
// TODO: skip flushing if already FLUSHED
setState(FLUSHING);
@@ -4215,6 +4307,26 @@
return OK;
}
+void MediaCodec::postPendingRepliesAndDeferredMessages(status_t err /* = OK */) {
+ sp<AMessage> response{new AMessage};
+ if (err != OK) {
+ response->setInt32("err", err);
+ }
+ postPendingRepliesAndDeferredMessages(response);
+}
+
+void MediaCodec::postPendingRepliesAndDeferredMessages(const sp<AMessage> &response) {
+ CHECK(mReplyID);
+ response->postReply(mReplyID);
+ mReplyID.clear();
+ ALOGV_IF(!mDeferredMessages.empty(),
+ "posting %zu deferred messages", mDeferredMessages.size());
+ for (sp<AMessage> msg : mDeferredMessages) {
+ msg->post();
+ }
+ mDeferredMessages.clear();
+}
+
std::string MediaCodec::stateString(State state) {
const char *rval = NULL;
char rawbuffer[16]; // room for "%d"
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index 8bbffd4..c91386d 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -92,7 +92,9 @@
}
sp<MetaData> trackMeta = new MetaData;
- convertMessageToMetaData(format, trackMeta);
+ if (convertMessageToMetaData(format, trackMeta) != OK) {
+ return BAD_VALUE;
+ }
sp<MediaAdapter> newTrack = new MediaAdapter(trackMeta);
status_t result = mWriter->addSource(newTrack);
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 85ff474..1f569ef 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -132,37 +132,47 @@
}
void setNativeWindowHdrMetadata(ANativeWindow *nativeWindow, HDRStaticInfo *info) {
- struct android_smpte2086_metadata smpte2086_meta = {
- .displayPrimaryRed = {
- info->sType1.mR.x * 0.00002f,
- info->sType1.mR.y * 0.00002f
- },
- .displayPrimaryGreen = {
- info->sType1.mG.x * 0.00002f,
- info->sType1.mG.y * 0.00002f
- },
- .displayPrimaryBlue = {
- info->sType1.mB.x * 0.00002f,
- info->sType1.mB.y * 0.00002f
- },
- .whitePoint = {
- info->sType1.mW.x * 0.00002f,
- info->sType1.mW.y * 0.00002f
- },
- .maxLuminance = (float) info->sType1.mMaxDisplayLuminance,
- .minLuminance = info->sType1.mMinDisplayLuminance * 0.0001f
- };
+ // If mastering max and min luminance fields are 0, do not use them.
+ // It indicates the value may not be present in the stream.
+ if ((float)info->sType1.mMaxDisplayLuminance > 0.0f &&
+ (info->sType1.mMinDisplayLuminance * 0.0001f) > 0.0f) {
+ struct android_smpte2086_metadata smpte2086_meta = {
+ .displayPrimaryRed = {
+ info->sType1.mR.x * 0.00002f,
+ info->sType1.mR.y * 0.00002f
+ },
+ .displayPrimaryGreen = {
+ info->sType1.mG.x * 0.00002f,
+ info->sType1.mG.y * 0.00002f
+ },
+ .displayPrimaryBlue = {
+ info->sType1.mB.x * 0.00002f,
+ info->sType1.mB.y * 0.00002f
+ },
+ .whitePoint = {
+ info->sType1.mW.x * 0.00002f,
+ info->sType1.mW.y * 0.00002f
+ },
+ .maxLuminance = (float) info->sType1.mMaxDisplayLuminance,
+ .minLuminance = info->sType1.mMinDisplayLuminance * 0.0001f
+ };
- int err = native_window_set_buffers_smpte2086_metadata(nativeWindow, &smpte2086_meta);
- ALOGW_IF(err != 0, "failed to set smpte2086 metadata on surface (%d)", err);
+ int err = native_window_set_buffers_smpte2086_metadata(nativeWindow, &smpte2086_meta);
+ ALOGW_IF(err != 0, "failed to set smpte2086 metadata on surface (%d)", err);
+ }
- struct android_cta861_3_metadata cta861_meta = {
- .maxContentLightLevel = (float) info->sType1.mMaxContentLightLevel,
- .maxFrameAverageLightLevel = (float) info->sType1.mMaxFrameAverageLightLevel
- };
+ // If the content light level fields are 0, do not use them, it
+ // indicates the value may not be present in the stream.
+ if ((float)info->sType1.mMaxContentLightLevel > 0.0f &&
+ (float)info->sType1.mMaxFrameAverageLightLevel > 0.0f) {
+ struct android_cta861_3_metadata cta861_meta = {
+ .maxContentLightLevel = (float) info->sType1.mMaxContentLightLevel,
+ .maxFrameAverageLightLevel = (float) info->sType1.mMaxFrameAverageLightLevel
+ };
- err = native_window_set_buffers_cta861_3_metadata(nativeWindow, &cta861_meta);
- ALOGW_IF(err != 0, "failed to set cta861_3 metadata on surface (%d)", err);
+ int err = native_window_set_buffers_cta861_3_metadata(nativeWindow, &cta861_meta);
+ ALOGW_IF(err != 0, "failed to set cta861_3 metadata on surface (%d)", err);
+ }
}
status_t setNativeWindowRotation(
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 3dceef7..a95829c 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -1,12 +1,17 @@
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- // { "name": "HEVCUtilsUnitTest" },
- //{ "name": "ExtractorFactoryTest" },
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
// writerTest fails about 5 out of 66
- // in addition to needing the download ability
- //{ "name": "writerTest" },
+ // { "name": "writerTest" },
+ { "name": "HEVCUtilsUnitTest" },
+ { "name": "ExtractorFactoryTest" }
+
+ ],
+
+ "presubmit": [
{
"name": "CtsMediaTestCases",
"options": [
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index d67874f..6446857 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -769,6 +769,7 @@
{ "sei", kKeySEI },
{ "text-format-data", kKeyTextFormatData },
{ "thumbnail-csd-hevc", kKeyThumbnailHVCC },
+ { "slow-motion-markers", kKeySlowMotionMarkers },
}
};
@@ -1663,13 +1664,16 @@
meta->setInt32(kKeyColorMatrix, colorAspects.mMatrixCoeffs);
}
}
-
-void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
+/* Converts key and value pairs in AMessage format to MetaData format.
+ * Also checks for the presence of required keys.
+ */
+status_t convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
AString mime;
if (msg->findString("mime", &mime)) {
meta->setCString(kKeyMIMEType, mime.c_str());
} else {
- ALOGW("did not find mime type");
+ ALOGE("did not find mime type");
+ return BAD_VALUE;
}
convertMessageToMetaDataFromMappings(msg, meta);
@@ -1718,7 +1722,8 @@
meta->setInt32(kKeyWidth, width);
meta->setInt32(kKeyHeight, height);
} else {
- ALOGV("did not find width and/or height");
+ ALOGE("did not find width and/or height");
+ return BAD_VALUE;
}
int32_t sarWidth, sarHeight;
@@ -1803,14 +1808,14 @@
}
}
} else if (mime.startsWith("audio/")) {
- int32_t numChannels;
- if (msg->findInt32("channel-count", &numChannels)) {
- meta->setInt32(kKeyChannelCount, numChannels);
+ int32_t numChannels, sampleRate;
+ if (!msg->findInt32("channel-count", &numChannels) ||
+ !msg->findInt32("sample-rate", &sampleRate)) {
+ ALOGE("did not find channel-count and/or sample-rate");
+ return BAD_VALUE;
}
- int32_t sampleRate;
- if (msg->findInt32("sample-rate", &sampleRate)) {
- meta->setInt32(kKeySampleRate, sampleRate);
- }
+ meta->setInt32(kKeyChannelCount, numChannels);
+ meta->setInt32(kKeySampleRate, sampleRate);
int32_t bitsPerSample;
if (msg->findInt32("bits-per-sample", &bitsPerSample)) {
meta->setInt32(kKeyBitsPerSample, bitsPerSample);
@@ -1925,7 +1930,8 @@
}
}
} else {
- ALOGW("We need csd-2!!. %s", msg->debugString().c_str());
+ ALOGE("We need csd-2!!. %s", msg->debugString().c_str());
+ return BAD_VALUE;
}
} else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
@@ -1991,6 +1997,7 @@
ALOGI("converted %s to:", msg->debugString(0).c_str());
meta->dumpToLog();
#endif
+ return OK;
}
status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink,
diff --git a/media/libstagefright/codecs/amrnb/TEST_MAPPING b/media/libstagefright/codecs/amrnb/TEST_MAPPING
index 2909099..343d08a 100644
--- a/media/libstagefright/codecs/amrnb/TEST_MAPPING
+++ b/media/libstagefright/codecs/amrnb/TEST_MAPPING
@@ -1,11 +1,10 @@
// mappings for frameworks/av/media/libstagefright/codecs/amrnb
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- // { "name": "AmrnbDecoderTest"},
-
- // TODO(b/148094059): unit tests not allowed to download content
- // { "name": "AmrnbEncoderTest"}
-
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "AmrnbDecoderTest"},
+ { "name": "AmrnbEncoderTest"}
]
}
diff --git a/media/libstagefright/codecs/amrwb/TEST_MAPPING b/media/libstagefright/codecs/amrwb/TEST_MAPPING
index 3d58ba2..0278d26 100644
--- a/media/libstagefright/codecs/amrwb/TEST_MAPPING
+++ b/media/libstagefright/codecs/amrwb/TEST_MAPPING
@@ -1,8 +1,10 @@
// mappings for frameworks/av/media/libstagefright/codecs/amrwb
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- // { "name": "AmrwbDecoderTest"}
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "AmrwbDecoderTest"}
]
}
diff --git a/media/libstagefright/codecs/amrwb/test/Android.bp b/media/libstagefright/codecs/amrwb/test/Android.bp
index 968215a..e8a2aa9 100644
--- a/media/libstagefright/codecs/amrwb/test/Android.bp
+++ b/media/libstagefright/codecs/amrwb/test/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "AmrwbDecoderTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/codecs/amrwbenc/TEST_MAPPING b/media/libstagefright/codecs/amrwbenc/TEST_MAPPING
index d53d665..045e8b3 100644
--- a/media/libstagefright/codecs/amrwbenc/TEST_MAPPING
+++ b/media/libstagefright/codecs/amrwbenc/TEST_MAPPING
@@ -1,8 +1,10 @@
// mappings for frameworks/av/media/libstagefright/codecs/amrwbenc
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- // { "name": "AmrwbEncoderTest"}
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "AmrwbEncoderTest"}
]
}
diff --git a/media/libstagefright/codecs/amrwbenc/test/Android.bp b/media/libstagefright/codecs/amrwbenc/test/Android.bp
index 7042bc5..0872570 100644
--- a/media/libstagefright/codecs/amrwbenc/test/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/test/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "AmrwbEncoderTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/codecs/m4v_h263/TEST_MAPPING b/media/libstagefright/codecs/m4v_h263/TEST_MAPPING
index 6b42847..ba3ff1c 100644
--- a/media/libstagefright/codecs/m4v_h263/TEST_MAPPING
+++ b/media/libstagefright/codecs/m4v_h263/TEST_MAPPING
@@ -1,7 +1,9 @@
// mappings for frameworks/av/media/libstagefright/codecs/m4v_h263
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
// the decoder reports something bad about an unexpected newline in the *config file
// and the config file looks like the AndroidTest.xml file that we put in there.
@@ -9,8 +11,8 @@
// between decode and encode AndroidTest.xml files -- except that encode does NOT
// finish with a newline.
// strange.
- // { "name": "Mpeg4H263DecoderTest"},
- // { "name": "Mpeg4H263EncoderTest"}
+ { "name": "Mpeg4H263DecoderTest"},
+ { "name": "Mpeg4H263EncoderTest"}
]
}
diff --git a/media/libstagefright/codecs/mp3dec/TEST_MAPPING b/media/libstagefright/codecs/mp3dec/TEST_MAPPING
index b237d65..4ef4317 100644
--- a/media/libstagefright/codecs/mp3dec/TEST_MAPPING
+++ b/media/libstagefright/codecs/mp3dec/TEST_MAPPING
@@ -1,7 +1,9 @@
// mappings for frameworks/av/media/libstagefright/codecs/mp3dec
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- // { "name": "Mp3DecoderTest"}
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "Mp3DecoderTest"}
]
}
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_6.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_6.cpp
index 1f8018a..c306873 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_6.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_dct_6.cpp
@@ -111,6 +111,7 @@
; FUNCTION CODE
----------------------------------------------------------------------------*/
+__attribute__((no_sanitize("integer")))
void pvmp3_dct_6(int32 vec[])
{
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_mdct_6.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_mdct_6.cpp
index 8d80e8f..1ba080d 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_mdct_6.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_mdct_6.cpp
@@ -118,7 +118,7 @@
; FUNCTION CODE
----------------------------------------------------------------------------*/
-
+__attribute__((no_sanitize("integer")))
void pvmp3_mdct_6(int32 vec[], int32 *history)
{
int32 i;
diff --git a/media/libstagefright/codecs/mp3dec/test/AndroidTest.xml b/media/libstagefright/codecs/mp3dec/test/AndroidTest.xml
index 7ff9732..233f9bb 100644
--- a/media/libstagefright/codecs/mp3dec/test/AndroidTest.xml
+++ b/media/libstagefright/codecs/mp3dec/test/AndroidTest.xml
@@ -19,7 +19,7 @@
<option name="cleanup" value="true" />
<option name="push" value="Mp3DecoderTest->/data/local/tmp/Mp3DecoderTest" />
<option name="push-file"
- key="https://storage.googleapis.com/android_media/frameworks/av/media/libstagefright/mp3dec/test/Mp3DecoderTest.zip?unzip=true"
+ key="https://storage.googleapis.com/android_media/frameworks/av/media/libstagefright/mp3dec/test/Mp3DecoderTest-1.1.zip?unzip=true"
value="/data/local/tmp/Mp3DecoderTestRes/" />
</target_preparer>
diff --git a/media/libstagefright/codecs/mp3dec/test/Mp3DecoderTest.cpp b/media/libstagefright/codecs/mp3dec/test/Mp3DecoderTest.cpp
index 99553ec..0784c0c 100644
--- a/media/libstagefright/codecs/mp3dec/test/Mp3DecoderTest.cpp
+++ b/media/libstagefright/codecs/mp3dec/test/Mp3DecoderTest.cpp
@@ -185,6 +185,7 @@
INSTANTIATE_TEST_SUITE_P(Mp3DecoderTestAll, Mp3DecoderTest,
::testing::Values(("bbb_44100hz_2ch_128kbps_mp3_30sec.mp3"),
("bbb_44100hz_2ch_128kbps_mp3_5mins.mp3"),
+ ("bug_136053885.mp3"),
("bbb_mp3_stereo_192kbps_48000hz.mp3")));
int main(int argc, char **argv) {
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
index 4f61aa8..5bb1879 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -58,6 +58,8 @@
mInputBufferCount(0),
mDecoder(NULL),
mHeader(NULL),
+ mNumChannels(1),
+ mSamplingRate(kRate),
mCodecDelay(0),
mSeekPreRoll(0),
mAnchorTimeUs(0),
@@ -169,11 +171,11 @@
}
opusParams->nAudioBandWidth = 0;
- opusParams->nSampleRate = kRate;
+ opusParams->nSampleRate = mSamplingRate;
opusParams->nBitRate = 0;
if (!isConfigured()) {
- opusParams->nChannels = 1;
+ opusParams->nChannels = mNumChannels;
} else {
opusParams->nChannels = mHeader->channels;
}
@@ -274,7 +276,8 @@
if (opusParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
-
+ mNumChannels = opusParams->nChannels;
+ mSamplingRate = opusParams->nSampleRate;
return OMX_ErrorNone;
}
@@ -496,6 +499,8 @@
*(reinterpret_cast<int64_t*>(inHeader->pBuffer +
inHeader->nOffset)),
kRate);
+ mSamplingRate = kRate;
+ mNumChannels = mHeader->channels;
notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
mOutputPortSettingsChange = AWAITING_DISABLED;
}
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.h b/media/libstagefright/codecs/opus/dec/SoftOpus.h
index 91cafa1..00058c8 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.h
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.h
@@ -70,6 +70,8 @@
OpusMSDecoder *mDecoder;
OpusHeader *mHeader;
+ int32_t mNumChannels;
+ int32_t mSamplingRate;
int64_t mCodecDelay;
int64_t mSeekPreRoll;
int64_t mSamplesToDiscard;
diff --git a/media/libstagefright/foundation/TEST_MAPPING b/media/libstagefright/foundation/TEST_MAPPING
index 0d6a6da..a70c352 100644
--- a/media/libstagefright/foundation/TEST_MAPPING
+++ b/media/libstagefright/foundation/TEST_MAPPING
@@ -1,9 +1,13 @@
// mappings for frameworks/av/media/libstagefright/foundation
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- //{ "name": "OpusHeaderTest" },
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "OpusHeaderTest" }
+ ],
+ "presubmit": [
{ "name": "sf_foundation_test" },
{ "name": "MetaDataBaseUnitTest"}
]
diff --git a/media/libstagefright/foundation/tests/OpusHeader/Android.bp b/media/libstagefright/foundation/tests/OpusHeader/Android.bp
index c1251a8..ed3298c 100644
--- a/media/libstagefright/foundation/tests/OpusHeader/Android.bp
+++ b/media/libstagefright/foundation/tests/OpusHeader/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "OpusHeaderTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/id3/TEST_MAPPING b/media/libstagefright/id3/TEST_MAPPING
index e4454c1..d070d25 100644
--- a/media/libstagefright/id3/TEST_MAPPING
+++ b/media/libstagefright/id3/TEST_MAPPING
@@ -1,9 +1,13 @@
// frameworks/av/media/libstagefright/id3
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- //{ "name": "ID3Test" },
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "ID3Test" }
+ ],
+ "presubmit": [
// this doesn't seem to run any tests.
// but: cts-tradefed run -m CtsMediaTestCases -t android.media.cts.MediaMetadataRetrieverTest
// does run he 32 and 64 bit tests, but not the instant tests
diff --git a/media/libstagefright/id3/test/Android.bp b/media/libstagefright/id3/test/Android.bp
index 9d26eec..acf38e2 100644
--- a/media/libstagefright/id3/test/Android.bp
+++ b/media/libstagefright/id3/test/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "ID3Test",
+ test_suites: ["device-tests"],
gtest: true,
srcs: ["ID3Test.cpp"],
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index cc40f76..797ba31 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -500,6 +500,7 @@
status_t setupAMRCodec(bool encoder, bool isWAMR, int32_t bitRate);
status_t setupG711Codec(bool encoder, int32_t sampleRate, int32_t numChannels);
+ status_t setupOpusCodec(bool encoder, int32_t sampleRate, int32_t numChannels);
status_t setupFlacCodec(
bool encoder, int32_t numChannels, int32_t sampleRate, int32_t compressionLevel,
AudioEncoding encoding);
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index c4026ec..46cff28 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -373,6 +373,7 @@
AString mOwnerName;
sp<MediaCodecInfo> mCodecInfo;
sp<AReplyToken> mReplyID;
+ std::vector<sp<AMessage>> mDeferredMessages;
uint32_t mFlags;
status_t mStickyError;
sp<Surface> mSurface;
@@ -442,6 +443,7 @@
static status_t PostAndAwaitResponse(
const sp<AMessage> &msg, sp<AMessage> *response);
+ void PostReplyWithError(const sp<AMessage> &msg, int32_t err);
void PostReplyWithError(const sp<AReplyToken> &replyID, int32_t err);
status_t init(const AString &name);
@@ -493,6 +495,9 @@
bool hasPendingBuffer(int portIndex);
bool hasPendingBuffer();
+ void postPendingRepliesAndDeferredMessages(status_t err = OK);
+ void postPendingRepliesAndDeferredMessages(const sp<AMessage> &response);
+
/* called to get the last codec error when the sticky flag is set.
* if no such codec error is found, returns UNKNOWN_ERROR.
*/
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 2f34094..6b0d28f 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -257,6 +257,10 @@
kKeyRtpCvoDegrees = 'cvod', // int32_t, rtp cvo degrees as per 3GPP 26.114.
kKeyRtpDscp = 'dscp', // int32_t, DSCP(Differentiated services codepoint) of RFC 2474.
kKeySocketNetwork = 'sNet', // int64_t, socket will be bound to network handle.
+
+ // Slow-motion markers
+ kKeySlowMotionMarkers = 'slmo', // raw data, byte array following spec for
+ // MediaFormat#KEY_SLOW_MOTION_MARKERS
};
enum {
diff --git a/media/libstagefright/include/media/stagefright/ProcessInfo.h b/media/libstagefright/include/media/stagefright/ProcessInfo.h
index 0be1a52..b8a3c10 100644
--- a/media/libstagefright/include/media/stagefright/ProcessInfo.h
+++ b/media/libstagefright/include/media/stagefright/ProcessInfo.h
@@ -20,6 +20,9 @@
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/ProcessInfoInterface.h>
+#include <map>
+#include <mutex>
+#include <utils/Condition.h>
namespace android {
@@ -28,11 +31,20 @@
virtual bool getPriority(int pid, int* priority);
virtual bool isValidPid(int pid);
+ virtual bool overrideProcessInfo(int pid, int procState, int oomScore);
+ virtual void removeProcessInfoOverride(int pid);
protected:
virtual ~ProcessInfo();
private:
+ struct ProcessInfoOverride {
+ int procState;
+ int oomScore;
+ };
+ std::mutex mOverrideLock;
+ std::map<int, ProcessInfoOverride> mOverrideMap GUARDED_BY(mOverrideLock);
+
DISALLOW_EVIL_CONSTRUCTORS(ProcessInfo);
};
diff --git a/media/libstagefright/include/media/stagefright/ProcessInfoInterface.h b/media/libstagefright/include/media/stagefright/ProcessInfoInterface.h
index b39112a..9260181 100644
--- a/media/libstagefright/include/media/stagefright/ProcessInfoInterface.h
+++ b/media/libstagefright/include/media/stagefright/ProcessInfoInterface.h
@@ -24,6 +24,8 @@
struct ProcessInfoInterface : public RefBase {
virtual bool getPriority(int pid, int* priority) = 0;
virtual bool isValidPid(int pid) = 0;
+ virtual bool overrideProcessInfo(int pid, int procState, int oomScore);
+ virtual void removeProcessInfoOverride(int pid);
protected:
virtual ~ProcessInfoInterface() {}
diff --git a/media/libstagefright/include/media/stagefright/Utils.h b/media/libstagefright/include/media/stagefright/Utils.h
index 2b9b759..1673120 100644
--- a/media/libstagefright/include/media/stagefright/Utils.h
+++ b/media/libstagefright/include/media/stagefright/Utils.h
@@ -33,7 +33,7 @@
const MetaDataBase *meta, sp<AMessage> *format);
status_t convertMetaDataToMessage(
const sp<MetaData> &meta, sp<AMessage> *format);
-void convertMessageToMetaData(
+status_t convertMessageToMetaData(
const sp<AMessage> &format, sp<MetaData> &meta);
// Returns a pointer to the next NAL start code in buffer of size |length| starting at |data|, or
diff --git a/media/libstagefright/mpeg2ts/TEST_MAPPING b/media/libstagefright/mpeg2ts/TEST_MAPPING
index b25d732..9f4bbdf 100644
--- a/media/libstagefright/mpeg2ts/TEST_MAPPING
+++ b/media/libstagefright/mpeg2ts/TEST_MAPPING
@@ -1,7 +1,9 @@
// frameworks/av/media/libstagefright/mpeg2ts
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- //{ "name": "Mpeg2tsUnitTest" }
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "Mpeg2tsUnitTest" }
]
}
diff --git a/media/libstagefright/renderfright/Android.bp b/media/libstagefright/renderfright/Android.bp
new file mode 100644
index 0000000..c17f84e
--- /dev/null
+++ b/media/libstagefright/renderfright/Android.bp
@@ -0,0 +1,111 @@
+cc_defaults {
+ name: "renderfright_defaults",
+ cflags: [
+ "-DLOG_TAG=\"renderfright\"",
+ "-Wall",
+ "-Werror",
+ "-Wthread-safety",
+ "-Wunused",
+ "-Wunreachable-code",
+ ],
+}
+
+cc_defaults {
+ name: "librenderfright_defaults",
+ defaults: ["renderfright_defaults"],
+ cflags: [
+ "-DGL_GLEXT_PROTOTYPES",
+ "-DEGL_EGLEXT_PROTOTYPES",
+ ],
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "libEGL",
+ "libGLESv1_CM",
+ "libGLESv2",
+ "libgui",
+ "liblog",
+ "libnativewindow",
+ "libprocessgroup",
+ "libsync",
+ "libui",
+ "libutils",
+ ],
+ local_include_dirs: ["include"],
+ export_include_dirs: ["include"],
+}
+
+filegroup {
+ name: "librenderfright_sources",
+ srcs: [
+ "Description.cpp",
+ "Mesh.cpp",
+ "RenderEngine.cpp",
+ "Texture.cpp",
+ ],
+}
+
+filegroup {
+ name: "librenderfright_gl_sources",
+ srcs: [
+ "gl/GLESRenderEngine.cpp",
+ "gl/GLExtensions.cpp",
+ "gl/GLFramebuffer.cpp",
+ "gl/GLImage.cpp",
+ "gl/GLShadowTexture.cpp",
+ "gl/GLShadowVertexGenerator.cpp",
+ "gl/GLSkiaShadowPort.cpp",
+ "gl/GLVertexBuffer.cpp",
+ "gl/ImageManager.cpp",
+ "gl/Program.cpp",
+ "gl/ProgramCache.cpp",
+ "gl/filters/BlurFilter.cpp",
+ "gl/filters/GenericProgram.cpp",
+ ],
+}
+
+filegroup {
+ name: "librenderfright_threaded_sources",
+ srcs: [
+ "threaded/RenderEngineThreaded.cpp",
+ ],
+}
+
+cc_library_static {
+ name: "librenderfright",
+ defaults: ["librenderfright_defaults"],
+ vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
+ double_loadable: true,
+ clang: true,
+ cflags: [
+ "-fvisibility=hidden",
+ "-Werror=format",
+ ],
+ srcs: [
+ ":librenderfright_sources",
+ ":librenderfright_gl_sources",
+ ":librenderfright_threaded_sources",
+ ],
+ lto: {
+ thin: true,
+ },
+}
+
+cc_library_static {
+ name: "librenderfright_mocks",
+ defaults: ["librenderfright_defaults"],
+ srcs: [
+ "mock/Framebuffer.cpp",
+ "mock/Image.cpp",
+ "mock/RenderEngine.cpp",
+ ],
+ static_libs: [
+ "libgtest",
+ "libgmock",
+ ],
+ local_include_dirs: ["include"],
+ export_include_dirs: ["include"],
+}
diff --git a/media/libstagefright/renderfright/Description.cpp b/media/libstagefright/renderfright/Description.cpp
new file mode 100644
index 0000000..b9cea10
--- /dev/null
+++ b/media/libstagefright/renderfright/Description.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/private/Description.h>
+
+#include <stdint.h>
+
+#include <utils/TypeHelpers.h>
+
+namespace android {
+namespace renderengine {
+
+Description::TransferFunction Description::dataSpaceToTransferFunction(ui::Dataspace dataSpace) {
+ ui::Dataspace transfer = static_cast<ui::Dataspace>(dataSpace & ui::Dataspace::TRANSFER_MASK);
+ switch (transfer) {
+ case ui::Dataspace::TRANSFER_ST2084:
+ return Description::TransferFunction::ST2084;
+ case ui::Dataspace::TRANSFER_HLG:
+ return Description::TransferFunction::HLG;
+ case ui::Dataspace::TRANSFER_LINEAR:
+ return Description::TransferFunction::LINEAR;
+ default:
+ return Description::TransferFunction::SRGB;
+ }
+}
+
+bool Description::hasInputTransformMatrix() const {
+ const mat4 identity;
+ return inputTransformMatrix != identity;
+}
+
+bool Description::hasOutputTransformMatrix() const {
+ const mat4 identity;
+ return outputTransformMatrix != identity;
+}
+
+bool Description::hasColorMatrix() const {
+ const mat4 identity;
+ return colorMatrix != identity;
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/Mesh.cpp b/media/libstagefright/renderfright/Mesh.cpp
new file mode 100644
index 0000000..ed2f45f
--- /dev/null
+++ b/media/libstagefright/renderfright/Mesh.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/Mesh.h>
+
+#include <utils/Log.h>
+
+namespace android {
+namespace renderengine {
+
+Mesh::Mesh(Primitive primitive, size_t vertexCount, size_t vertexSize, size_t texCoordSize,
+ size_t cropCoordsSize, size_t shadowColorSize, size_t shadowParamsSize,
+ size_t indexCount)
+ : mVertexCount(vertexCount),
+ mVertexSize(vertexSize),
+ mTexCoordsSize(texCoordSize),
+ mCropCoordsSize(cropCoordsSize),
+ mShadowColorSize(shadowColorSize),
+ mShadowParamsSize(shadowParamsSize),
+ mPrimitive(primitive),
+ mIndexCount(indexCount) {
+ if (vertexCount == 0) {
+ mVertices.resize(1);
+ mVertices[0] = 0.0f;
+ mStride = 0;
+ return;
+ }
+ size_t stride = vertexSize + texCoordSize + cropCoordsSize + shadowColorSize + shadowParamsSize;
+ size_t remainder = (stride * vertexCount) / vertexCount;
+ // Since all of the input parameters are unsigned, if stride is less than
+ // either vertexSize or texCoordSize, it must have overflowed. remainder
+ // will be equal to stride as long as stride * vertexCount doesn't overflow.
+ if ((stride < vertexSize) || (remainder != stride)) {
+ ALOGE("Overflow in Mesh(..., %zu, %zu, %zu, %zu, %zu, %zu)", vertexCount, vertexSize,
+ texCoordSize, cropCoordsSize, shadowColorSize, shadowParamsSize);
+ mVertices.resize(1);
+ mVertices[0] = 0.0f;
+ mVertexCount = 0;
+ mVertexSize = 0;
+ mTexCoordsSize = 0;
+ mCropCoordsSize = 0;
+ mShadowColorSize = 0;
+ mShadowParamsSize = 0;
+ mStride = 0;
+ return;
+ }
+
+ mVertices.resize(stride * vertexCount);
+ mStride = stride;
+ mIndices.resize(indexCount);
+}
+
+Mesh::Primitive Mesh::getPrimitive() const {
+ return mPrimitive;
+}
+
+float const* Mesh::getPositions() const {
+ return mVertices.data();
+}
+float* Mesh::getPositions() {
+ return mVertices.data();
+}
+
+float const* Mesh::getTexCoords() const {
+ return mVertices.data() + mVertexSize;
+}
+float* Mesh::getTexCoords() {
+ return mVertices.data() + mVertexSize;
+}
+
+float const* Mesh::getCropCoords() const {
+ return mVertices.data() + mVertexSize + mTexCoordsSize;
+}
+float* Mesh::getCropCoords() {
+ return mVertices.data() + mVertexSize + mTexCoordsSize;
+}
+
+float const* Mesh::getShadowColor() const {
+ return mVertices.data() + mVertexSize + mTexCoordsSize + mCropCoordsSize;
+}
+float* Mesh::getShadowColor() {
+ return mVertices.data() + mVertexSize + mTexCoordsSize + mCropCoordsSize;
+}
+
+float const* Mesh::getShadowParams() const {
+ return mVertices.data() + mVertexSize + mTexCoordsSize + mCropCoordsSize + mShadowColorSize;
+}
+float* Mesh::getShadowParams() {
+ return mVertices.data() + mVertexSize + mTexCoordsSize + mCropCoordsSize + mShadowColorSize;
+}
+
+uint16_t const* Mesh::getIndices() const {
+ return mIndices.data();
+}
+
+uint16_t* Mesh::getIndices() {
+ return mIndices.data();
+}
+
+size_t Mesh::getVertexCount() const {
+ return mVertexCount;
+}
+
+size_t Mesh::getVertexSize() const {
+ return mVertexSize;
+}
+
+size_t Mesh::getTexCoordsSize() const {
+ return mTexCoordsSize;
+}
+
+size_t Mesh::getShadowColorSize() const {
+ return mShadowColorSize;
+}
+
+size_t Mesh::getShadowParamsSize() const {
+ return mShadowParamsSize;
+}
+
+size_t Mesh::getByteStride() const {
+ return mStride * sizeof(float);
+}
+
+size_t Mesh::getStride() const {
+ return mStride;
+}
+
+size_t Mesh::getIndexCount() const {
+ return mIndexCount;
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/RenderEngine.cpp b/media/libstagefright/renderfright/RenderEngine.cpp
new file mode 100644
index 0000000..c3fbb60
--- /dev/null
+++ b/media/libstagefright/renderfright/RenderEngine.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/RenderEngine.h>
+
+#include <cutils/properties.h>
+#include <log/log.h>
+#include <private/gui/SyncFeatures.h>
+#include "gl/GLESRenderEngine.h"
+#include "threaded/RenderEngineThreaded.h"
+
+namespace android {
+namespace renderengine {
+
+std::unique_ptr<RenderEngine> RenderEngine::create(const RenderEngineCreationArgs& args) {
+ RenderEngineType renderEngineType = args.renderEngineType;
+
+ // Keep the ability to override by PROPERTIES:
+ char prop[PROPERTY_VALUE_MAX];
+ property_get(PROPERTY_DEBUG_RENDERENGINE_BACKEND, prop, "");
+ if (strcmp(prop, "gles") == 0) {
+ renderEngineType = RenderEngineType::GLES;
+ }
+ if (strcmp(prop, "threaded") == 0) {
+ renderEngineType = RenderEngineType::THREADED;
+ }
+
+ switch (renderEngineType) {
+ case RenderEngineType::THREADED:
+ ALOGD("Threaded RenderEngine with GLES Backend");
+ return renderengine::threaded::RenderEngineThreaded::create(
+ [args]() { return android::renderengine::gl::GLESRenderEngine::create(args); });
+ case RenderEngineType::GLES:
+ default:
+ ALOGD("RenderEngine with GLES Backend");
+ return renderengine::gl::GLESRenderEngine::create(args);
+ }
+}
+
+RenderEngine::~RenderEngine() = default;
+
+namespace impl {
+
+RenderEngine::RenderEngine(const RenderEngineCreationArgs& args) : mArgs(args) {}
+
+RenderEngine::~RenderEngine() = default;
+
+bool RenderEngine::useNativeFenceSync() const {
+ return SyncFeatures::getInstance().useNativeFenceSync();
+}
+
+bool RenderEngine::useWaitSync() const {
+ return SyncFeatures::getInstance().useWaitSync();
+}
+
+} // namespace impl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/Texture.cpp b/media/libstagefright/renderfright/Texture.cpp
new file mode 100644
index 0000000..154cde8
--- /dev/null
+++ b/media/libstagefright/renderfright/Texture.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/Texture.h>
+
+namespace android {
+namespace renderengine {
+
+Texture::Texture()
+ : mTextureName(0), mTextureTarget(TEXTURE_2D), mWidth(0), mHeight(0), mFiltering(false) {}
+
+Texture::Texture(Target textureTarget, uint32_t textureName)
+ : mTextureName(textureName),
+ mTextureTarget(textureTarget),
+ mWidth(0),
+ mHeight(0),
+ mFiltering(false) {}
+
+void Texture::init(Target textureTarget, uint32_t textureName) {
+ mTextureName = textureName;
+ mTextureTarget = textureTarget;
+}
+
+Texture::~Texture() {}
+
+void Texture::setMatrix(float const* matrix) {
+ mTextureMatrix = mat4(matrix);
+}
+
+void Texture::setFiltering(bool enabled) {
+ mFiltering = enabled;
+}
+
+void Texture::setDimensions(size_t width, size_t height) {
+ mWidth = width;
+ mHeight = height;
+}
+
+uint32_t Texture::getTextureName() const {
+ return mTextureName;
+}
+
+uint32_t Texture::getTextureTarget() const {
+ return mTextureTarget;
+}
+
+const mat4& Texture::getMatrix() const {
+ return mTextureMatrix;
+}
+
+bool Texture::getFiltering() const {
+ return mFiltering;
+}
+
+size_t Texture::getWidth() const {
+ return mWidth;
+}
+
+size_t Texture::getHeight() const {
+ return mHeight;
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLESRenderEngine.cpp b/media/libstagefright/renderfright/gl/GLESRenderEngine.cpp
new file mode 100644
index 0000000..824bdd9
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLESRenderEngine.cpp
@@ -0,0 +1,1772 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#undef LOG_TAG
+#define LOG_TAG "RenderEngine"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include <sched.h>
+#include <cmath>
+#include <fstream>
+#include <sstream>
+#include <unordered_set>
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <android-base/stringprintf.h>
+#include <cutils/compiler.h>
+#include <cutils/properties.h>
+#include <gui/DebugEGLImageTracker.h>
+#include <renderengine/Mesh.h>
+#include <renderengine/Texture.h>
+#include <renderengine/private/Description.h>
+#include <sync/sync.h>
+#include <ui/ColorSpace.h>
+#include <ui/DebugUtils.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/Rect.h>
+#include <ui/Region.h>
+#include <utils/KeyedVector.h>
+#include <utils/Trace.h>
+#include "GLESRenderEngine.h"
+#include "GLExtensions.h"
+#include "GLFramebuffer.h"
+#include "GLImage.h"
+#include "GLShadowVertexGenerator.h"
+#include "Program.h"
+#include "ProgramCache.h"
+#include "filters/BlurFilter.h"
+
+bool checkGlError(const char* op, int lineNumber) {
+ bool errorFound = false;
+ GLint error = glGetError();
+ while (error != GL_NO_ERROR) {
+ errorFound = true;
+ error = glGetError();
+ ALOGV("after %s() (line # %d) glError (0x%x)\n", op, lineNumber, error);
+ }
+ return errorFound;
+}
+
+static constexpr bool outputDebugPPMs = false;
+
+void writePPM(const char* basename, GLuint width, GLuint height) {
+ ALOGV("writePPM #%s: %d x %d", basename, width, height);
+
+ std::vector<GLubyte> pixels(width * height * 4);
+ std::vector<GLubyte> outBuffer(width * height * 3);
+
+ // TODO(courtneygo): We can now have float formats, need
+ // to remove this code or update to support.
+ // Make returned pixels fit in uint32_t, one byte per component
+ glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels.data());
+ if (checkGlError(__FUNCTION__, __LINE__)) {
+ return;
+ }
+
+ std::string filename(basename);
+ filename.append(".ppm");
+ std::ofstream file(filename.c_str(), std::ios::binary);
+ if (!file.is_open()) {
+ ALOGE("Unable to open file: %s", filename.c_str());
+ ALOGE("You may need to do: \"adb shell setenforce 0\" to enable "
+ "surfaceflinger to write debug images");
+ return;
+ }
+
+ file << "P6\n";
+ file << width << "\n";
+ file << height << "\n";
+ file << 255 << "\n";
+
+ auto ptr = reinterpret_cast<char*>(pixels.data());
+ auto outPtr = reinterpret_cast<char*>(outBuffer.data());
+ for (int y = height - 1; y >= 0; y--) {
+ char* data = ptr + y * width * sizeof(uint32_t);
+
+ for (GLuint x = 0; x < width; x++) {
+ // Only copy R, G and B components
+ outPtr[0] = data[0];
+ outPtr[1] = data[1];
+ outPtr[2] = data[2];
+ data += sizeof(uint32_t);
+ outPtr += 3;
+ }
+ }
+ file.write(reinterpret_cast<char*>(outBuffer.data()), outBuffer.size());
+}
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+using base::StringAppendF;
+using ui::Dataspace;
+
+static status_t selectConfigForAttribute(EGLDisplay dpy, EGLint const* attrs, EGLint attribute,
+ EGLint wanted, EGLConfig* outConfig) {
+ EGLint numConfigs = -1, n = 0;
+ eglGetConfigs(dpy, nullptr, 0, &numConfigs);
+ std::vector<EGLConfig> configs(numConfigs, EGL_NO_CONFIG_KHR);
+ eglChooseConfig(dpy, attrs, configs.data(), configs.size(), &n);
+ configs.resize(n);
+
+ if (!configs.empty()) {
+ if (attribute != EGL_NONE) {
+ for (EGLConfig config : configs) {
+ EGLint value = 0;
+ eglGetConfigAttrib(dpy, config, attribute, &value);
+ if (wanted == value) {
+ *outConfig = config;
+ return NO_ERROR;
+ }
+ }
+ } else {
+ // just pick the first one
+ *outConfig = configs[0];
+ return NO_ERROR;
+ }
+ }
+
+ return NAME_NOT_FOUND;
+}
+
+static status_t selectEGLConfig(EGLDisplay display, EGLint format, EGLint renderableType,
+ EGLConfig* config) {
+ // select our EGLConfig. It must support EGL_RECORDABLE_ANDROID if
+ // it is to be used with WIFI displays
+ status_t err;
+ EGLint wantedAttribute;
+ EGLint wantedAttributeValue;
+
+ std::vector<EGLint> attribs;
+ if (renderableType) {
+ const ui::PixelFormat pixelFormat = static_cast<ui::PixelFormat>(format);
+ const bool is1010102 = pixelFormat == ui::PixelFormat::RGBA_1010102;
+
+ // Default to 8 bits per channel.
+ const EGLint tmpAttribs[] = {
+ EGL_RENDERABLE_TYPE,
+ renderableType,
+ EGL_RECORDABLE_ANDROID,
+ EGL_TRUE,
+ EGL_SURFACE_TYPE,
+ EGL_WINDOW_BIT | EGL_PBUFFER_BIT,
+ EGL_FRAMEBUFFER_TARGET_ANDROID,
+ EGL_TRUE,
+ EGL_RED_SIZE,
+ is1010102 ? 10 : 8,
+ EGL_GREEN_SIZE,
+ is1010102 ? 10 : 8,
+ EGL_BLUE_SIZE,
+ is1010102 ? 10 : 8,
+ EGL_ALPHA_SIZE,
+ is1010102 ? 2 : 8,
+ EGL_NONE,
+ };
+ std::copy(tmpAttribs, tmpAttribs + (sizeof(tmpAttribs) / sizeof(EGLint)),
+ std::back_inserter(attribs));
+ wantedAttribute = EGL_NONE;
+ wantedAttributeValue = EGL_NONE;
+ } else {
+ // if no renderable type specified, fallback to a simplified query
+ wantedAttribute = EGL_NATIVE_VISUAL_ID;
+ wantedAttributeValue = format;
+ }
+
+ err = selectConfigForAttribute(display, attribs.data(), wantedAttribute, wantedAttributeValue,
+ config);
+ if (err == NO_ERROR) {
+ EGLint caveat;
+ if (eglGetConfigAttrib(display, *config, EGL_CONFIG_CAVEAT, &caveat))
+ ALOGW_IF(caveat == EGL_SLOW_CONFIG, "EGL_SLOW_CONFIG selected!");
+ }
+
+ return err;
+}
+
+std::unique_ptr<GLESRenderEngine> GLESRenderEngine::create(const RenderEngineCreationArgs& args) {
+ // initialize EGL for the default display
+ EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ if (!eglInitialize(display, nullptr, nullptr)) {
+ LOG_ALWAYS_FATAL("failed to initialize EGL");
+ }
+
+ const auto eglVersion = eglQueryString(display, EGL_VERSION);
+ if (!eglVersion) {
+ checkGlError(__FUNCTION__, __LINE__);
+ LOG_ALWAYS_FATAL("eglQueryString(EGL_VERSION) failed");
+ }
+
+ const auto eglExtensions = eglQueryString(display, EGL_EXTENSIONS);
+ if (!eglExtensions) {
+ checkGlError(__FUNCTION__, __LINE__);
+ LOG_ALWAYS_FATAL("eglQueryString(EGL_EXTENSIONS) failed");
+ }
+
+ GLExtensions& extensions = GLExtensions::getInstance();
+ extensions.initWithEGLStrings(eglVersion, eglExtensions);
+
+ // The code assumes that ES2 or later is available if this extension is
+ // supported.
+ EGLConfig config = EGL_NO_CONFIG;
+ if (!extensions.hasNoConfigContext()) {
+ config = chooseEglConfig(display, args.pixelFormat, /*logConfig*/ true);
+ }
+
+ bool useContextPriority =
+ extensions.hasContextPriority() && args.contextPriority == ContextPriority::HIGH;
+ EGLContext protectedContext = EGL_NO_CONTEXT;
+ if (args.enableProtectedContext && extensions.hasProtectedContent()) {
+ protectedContext = createEglContext(display, config, nullptr, useContextPriority,
+ Protection::PROTECTED);
+ ALOGE_IF(protectedContext == EGL_NO_CONTEXT, "Can't create protected context");
+ }
+
+ EGLContext ctxt = createEglContext(display, config, protectedContext, useContextPriority,
+ Protection::UNPROTECTED);
+
+ // if can't create a GL context, we can only abort.
+ LOG_ALWAYS_FATAL_IF(ctxt == EGL_NO_CONTEXT, "EGLContext creation failed");
+
+ EGLSurface stub = EGL_NO_SURFACE;
+ if (!extensions.hasSurfacelessContext()) {
+ stub = createStubEglPbufferSurface(display, config, args.pixelFormat,
+ Protection::UNPROTECTED);
+ LOG_ALWAYS_FATAL_IF(stub == EGL_NO_SURFACE, "can't create stub pbuffer");
+ }
+ EGLBoolean success = eglMakeCurrent(display, stub, stub, ctxt);
+ LOG_ALWAYS_FATAL_IF(!success, "can't make stub pbuffer current");
+ extensions.initWithGLStrings(glGetString(GL_VENDOR), glGetString(GL_RENDERER),
+ glGetString(GL_VERSION), glGetString(GL_EXTENSIONS));
+
+ EGLSurface protectedStub = EGL_NO_SURFACE;
+ if (protectedContext != EGL_NO_CONTEXT && !extensions.hasSurfacelessContext()) {
+ protectedStub = createStubEglPbufferSurface(display, config, args.pixelFormat,
+ Protection::PROTECTED);
+ ALOGE_IF(protectedStub == EGL_NO_SURFACE, "can't create protected stub pbuffer");
+ }
+
+ // now figure out what version of GL did we actually get
+ GlesVersion version = parseGlesVersion(extensions.getVersion());
+
+ LOG_ALWAYS_FATAL_IF(args.supportsBackgroundBlur && version < GLES_VERSION_3_0,
+ "Blurs require OpenGL ES 3.0. Please unset ro.surface_flinger.supports_background_blur");
+
+ // initialize the renderer while GL is current
+ std::unique_ptr<GLESRenderEngine> engine;
+ switch (version) {
+ case GLES_VERSION_1_0:
+ case GLES_VERSION_1_1:
+ LOG_ALWAYS_FATAL("SurfaceFlinger requires OpenGL ES 2.0 minimum to run.");
+ break;
+ case GLES_VERSION_2_0:
+ case GLES_VERSION_3_0:
+ engine = std::make_unique<GLESRenderEngine>(args, display, config, ctxt, stub,
+ protectedContext, protectedStub);
+ break;
+ }
+
+ ALOGI("OpenGL ES informations:");
+ ALOGI("vendor : %s", extensions.getVendor());
+ ALOGI("renderer : %s", extensions.getRenderer());
+ ALOGI("version : %s", extensions.getVersion());
+ ALOGI("extensions: %s", extensions.getExtensions());
+ ALOGI("GL_MAX_TEXTURE_SIZE = %zu", engine->getMaxTextureSize());
+ ALOGI("GL_MAX_VIEWPORT_DIMS = %zu", engine->getMaxViewportDims());
+
+ return engine;
+}
+
+EGLConfig GLESRenderEngine::chooseEglConfig(EGLDisplay display, int format, bool logConfig) {
+ status_t err;
+ EGLConfig config;
+
+ // First try to get an ES3 config
+ err = selectEGLConfig(display, format, EGL_OPENGL_ES3_BIT, &config);
+ if (err != NO_ERROR) {
+ // If ES3 fails, try to get an ES2 config
+ err = selectEGLConfig(display, format, EGL_OPENGL_ES2_BIT, &config);
+ if (err != NO_ERROR) {
+ // If ES2 still doesn't work, probably because we're on the emulator.
+ // try a simplified query
+ ALOGW("no suitable EGLConfig found, trying a simpler query");
+ err = selectEGLConfig(display, format, 0, &config);
+ if (err != NO_ERROR) {
+ // this EGL is too lame for android
+ LOG_ALWAYS_FATAL("no suitable EGLConfig found, giving up");
+ }
+ }
+ }
+
+ if (logConfig) {
+ // print some debugging info
+ EGLint r, g, b, a;
+ eglGetConfigAttrib(display, config, EGL_RED_SIZE, &r);
+ eglGetConfigAttrib(display, config, EGL_GREEN_SIZE, &g);
+ eglGetConfigAttrib(display, config, EGL_BLUE_SIZE, &b);
+ eglGetConfigAttrib(display, config, EGL_ALPHA_SIZE, &a);
+ ALOGI("EGL information:");
+ ALOGI("vendor : %s", eglQueryString(display, EGL_VENDOR));
+ ALOGI("version : %s", eglQueryString(display, EGL_VERSION));
+ ALOGI("extensions: %s", eglQueryString(display, EGL_EXTENSIONS));
+ ALOGI("Client API: %s", eglQueryString(display, EGL_CLIENT_APIS) ?: "Not Supported");
+ ALOGI("EGLSurface: %d-%d-%d-%d, config=%p", r, g, b, a, config);
+ }
+
+ return config;
+}
+
+GLESRenderEngine::GLESRenderEngine(const RenderEngineCreationArgs& args, EGLDisplay display,
+ EGLConfig config, EGLContext ctxt, EGLSurface stub,
+ EGLContext protectedContext, EGLSurface protectedStub)
+ : renderengine::impl::RenderEngine(args),
+ mEGLDisplay(display),
+ mEGLConfig(config),
+ mEGLContext(ctxt),
+ mStubSurface(stub),
+ mProtectedEGLContext(protectedContext),
+ mProtectedStubSurface(protectedStub),
+ mVpWidth(0),
+ mVpHeight(0),
+ mFramebufferImageCacheSize(args.imageCacheSize),
+ mUseColorManagement(args.useColorManagement) {
+ glGetIntegerv(GL_MAX_TEXTURE_SIZE, &mMaxTextureSize);
+ glGetIntegerv(GL_MAX_VIEWPORT_DIMS, mMaxViewportDims);
+
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
+ glPixelStorei(GL_PACK_ALIGNMENT, 4);
+
+ // Initialize protected EGL Context.
+ if (mProtectedEGLContext != EGL_NO_CONTEXT) {
+ EGLBoolean success = eglMakeCurrent(display, mProtectedStubSurface, mProtectedStubSurface,
+ mProtectedEGLContext);
+ ALOGE_IF(!success, "can't make protected context current");
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
+ glPixelStorei(GL_PACK_ALIGNMENT, 4);
+ success = eglMakeCurrent(display, mStubSurface, mStubSurface, mEGLContext);
+ LOG_ALWAYS_FATAL_IF(!success, "can't make default context current");
+ }
+
+ // mColorBlindnessCorrection = M;
+
+ if (mUseColorManagement) {
+ const ColorSpace srgb(ColorSpace::sRGB());
+ const ColorSpace displayP3(ColorSpace::DisplayP3());
+ const ColorSpace bt2020(ColorSpace::BT2020());
+
+ // no chromatic adaptation needed since all color spaces use D65 for their white points.
+ mSrgbToXyz = mat4(srgb.getRGBtoXYZ());
+ mDisplayP3ToXyz = mat4(displayP3.getRGBtoXYZ());
+ mBt2020ToXyz = mat4(bt2020.getRGBtoXYZ());
+ mXyzToSrgb = mat4(srgb.getXYZtoRGB());
+ mXyzToDisplayP3 = mat4(displayP3.getXYZtoRGB());
+ mXyzToBt2020 = mat4(bt2020.getXYZtoRGB());
+
+ // Compute sRGB to Display P3 and BT2020 transform matrix.
+ // NOTE: For now, we are limiting output wide color space support to
+ // Display-P3 and BT2020 only.
+ mSrgbToDisplayP3 = mXyzToDisplayP3 * mSrgbToXyz;
+ mSrgbToBt2020 = mXyzToBt2020 * mSrgbToXyz;
+
+ // Compute Display P3 to sRGB and BT2020 transform matrix.
+ mDisplayP3ToSrgb = mXyzToSrgb * mDisplayP3ToXyz;
+ mDisplayP3ToBt2020 = mXyzToBt2020 * mDisplayP3ToXyz;
+
+ // Compute BT2020 to sRGB and Display P3 transform matrix
+ mBt2020ToSrgb = mXyzToSrgb * mBt2020ToXyz;
+ mBt2020ToDisplayP3 = mXyzToDisplayP3 * mBt2020ToXyz;
+ }
+
+ char value[PROPERTY_VALUE_MAX];
+ property_get("debug.egl.traceGpuCompletion", value, "0");
+ if (atoi(value)) {
+ mTraceGpuCompletion = true;
+ mFlushTracer = std::make_unique<FlushTracer>(this);
+ }
+
+ if (args.supportsBackgroundBlur) {
+ mBlurFilter = new BlurFilter(*this);
+ checkErrors("BlurFilter creation");
+ }
+
+ mImageManager = std::make_unique<ImageManager>(this);
+ mImageManager->initThread();
+ mDrawingBuffer = createFramebuffer();
+ sp<GraphicBuffer> buf =
+ new GraphicBuffer(1, 1, PIXEL_FORMAT_RGBA_8888, 1,
+ GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE, "placeholder");
+
+ const status_t err = buf->initCheck();
+ if (err != OK) {
+ ALOGE("Error allocating placeholder buffer: %d", err);
+ return;
+ }
+ mPlaceholderBuffer = buf.get();
+ EGLint attributes[] = {
+ EGL_NONE,
+ };
+ mPlaceholderImage = eglCreateImageKHR(mEGLDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ mPlaceholderBuffer, attributes);
+ ALOGE_IF(mPlaceholderImage == EGL_NO_IMAGE_KHR, "Failed to create placeholder image: %#x",
+ eglGetError());
+}
+
+GLESRenderEngine::~GLESRenderEngine() {
+ // Destroy the image manager first.
+ mImageManager = nullptr;
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ unbindFrameBuffer(mDrawingBuffer.get());
+ mDrawingBuffer = nullptr;
+ while (!mFramebufferImageCache.empty()) {
+ EGLImageKHR expired = mFramebufferImageCache.front().second;
+ mFramebufferImageCache.pop_front();
+ eglDestroyImageKHR(mEGLDisplay, expired);
+ DEBUG_EGL_IMAGE_TRACKER_DESTROY();
+ }
+ eglDestroyImageKHR(mEGLDisplay, mPlaceholderImage);
+ mImageCache.clear();
+ eglMakeCurrent(mEGLDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
+ eglTerminate(mEGLDisplay);
+}
+
+std::unique_ptr<Framebuffer> GLESRenderEngine::createFramebuffer() {
+ return std::make_unique<GLFramebuffer>(*this);
+}
+
+std::unique_ptr<Image> GLESRenderEngine::createImage() {
+ return std::make_unique<GLImage>(*this);
+}
+
+Framebuffer* GLESRenderEngine::getFramebufferForDrawing() {
+ return mDrawingBuffer.get();
+}
+
+void GLESRenderEngine::primeCache() const {
+ ProgramCache::getInstance().primeCache(mInProtectedContext ? mProtectedEGLContext : mEGLContext,
+ mArgs.useColorManagement,
+ mArgs.precacheToneMapperShaderOnly);
+}
+
+base::unique_fd GLESRenderEngine::flush() {
+ ATRACE_CALL();
+ if (!GLExtensions::getInstance().hasNativeFenceSync()) {
+ return base::unique_fd();
+ }
+
+ EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);
+ if (sync == EGL_NO_SYNC_KHR) {
+ ALOGW("failed to create EGL native fence sync: %#x", eglGetError());
+ return base::unique_fd();
+ }
+
+ // native fence fd will not be populated until flush() is done.
+ glFlush();
+
+ // get the fence fd
+ base::unique_fd fenceFd(eglDupNativeFenceFDANDROID(mEGLDisplay, sync));
+ eglDestroySyncKHR(mEGLDisplay, sync);
+ if (fenceFd == EGL_NO_NATIVE_FENCE_FD_ANDROID) {
+ ALOGW("failed to dup EGL native fence sync: %#x", eglGetError());
+ }
+
+ // Only trace if we have a valid fence, as current usage falls back to
+ // calling finish() if the fence fd is invalid.
+ if (CC_UNLIKELY(mTraceGpuCompletion && mFlushTracer) && fenceFd.get() >= 0) {
+ mFlushTracer->queueSync(eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_FENCE_KHR, nullptr));
+ }
+
+ return fenceFd;
+}
+
+bool GLESRenderEngine::finish() {
+ ATRACE_CALL();
+ if (!GLExtensions::getInstance().hasFenceSync()) {
+ ALOGW("no synchronization support");
+ return false;
+ }
+
+ EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_FENCE_KHR, nullptr);
+ if (sync == EGL_NO_SYNC_KHR) {
+ ALOGW("failed to create EGL fence sync: %#x", eglGetError());
+ return false;
+ }
+
+ if (CC_UNLIKELY(mTraceGpuCompletion && mFlushTracer)) {
+ mFlushTracer->queueSync(eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_FENCE_KHR, nullptr));
+ }
+
+ return waitSync(sync, EGL_SYNC_FLUSH_COMMANDS_BIT_KHR);
+}
+
+bool GLESRenderEngine::waitSync(EGLSyncKHR sync, EGLint flags) {
+ EGLint result = eglClientWaitSyncKHR(mEGLDisplay, sync, flags, 2000000000 /*2 sec*/);
+ EGLint error = eglGetError();
+ eglDestroySyncKHR(mEGLDisplay, sync);
+ if (result != EGL_CONDITION_SATISFIED_KHR) {
+ if (result == EGL_TIMEOUT_EXPIRED_KHR) {
+ ALOGW("fence wait timed out");
+ } else {
+ ALOGW("error waiting on EGL fence: %#x", error);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+bool GLESRenderEngine::waitFence(base::unique_fd fenceFd) {
+ if (!GLExtensions::getInstance().hasNativeFenceSync() ||
+ !GLExtensions::getInstance().hasWaitSync()) {
+ return false;
+ }
+
+ // release the fd and transfer the ownership to EGLSync
+ EGLint attribs[] = {EGL_SYNC_NATIVE_FENCE_FD_ANDROID, fenceFd.release(), EGL_NONE};
+ EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, attribs);
+ if (sync == EGL_NO_SYNC_KHR) {
+ ALOGE("failed to create EGL native fence sync: %#x", eglGetError());
+ return false;
+ }
+
+ // XXX: The spec draft is inconsistent as to whether this should return an
+ // EGLint or void. Ignore the return value for now, as it's not strictly
+ // needed.
+ eglWaitSyncKHR(mEGLDisplay, sync, 0);
+ EGLint error = eglGetError();
+ eglDestroySyncKHR(mEGLDisplay, sync);
+ if (error != EGL_SUCCESS) {
+ ALOGE("failed to wait for EGL native fence sync: %#x", error);
+ return false;
+ }
+
+ return true;
+}
+
+void GLESRenderEngine::clearWithColor(float red, float green, float blue, float alpha) {
+ ATRACE_CALL();
+ glDisable(GL_BLEND);
+ glClearColor(red, green, blue, alpha);
+ glClear(GL_COLOR_BUFFER_BIT);
+}
+
+void GLESRenderEngine::fillRegionWithColor(const Region& region, float red, float green, float blue,
+ float alpha) {
+ size_t c;
+ Rect const* r = region.getArray(&c);
+ Mesh mesh = Mesh::Builder()
+ .setPrimitive(Mesh::TRIANGLES)
+ .setVertices(c * 6 /* count */, 2 /* size */)
+ .build();
+ Mesh::VertexArray<vec2> position(mesh.getPositionArray<vec2>());
+ for (size_t i = 0; i < c; i++, r++) {
+ position[i * 6 + 0].x = r->left;
+ position[i * 6 + 0].y = r->top;
+ position[i * 6 + 1].x = r->left;
+ position[i * 6 + 1].y = r->bottom;
+ position[i * 6 + 2].x = r->right;
+ position[i * 6 + 2].y = r->bottom;
+ position[i * 6 + 3].x = r->left;
+ position[i * 6 + 3].y = r->top;
+ position[i * 6 + 4].x = r->right;
+ position[i * 6 + 4].y = r->bottom;
+ position[i * 6 + 5].x = r->right;
+ position[i * 6 + 5].y = r->top;
+ }
+ setupFillWithColor(red, green, blue, alpha);
+ drawMesh(mesh);
+}
+
+void GLESRenderEngine::setScissor(const Rect& region) {
+ glScissor(region.left, region.top, region.getWidth(), region.getHeight());
+ glEnable(GL_SCISSOR_TEST);
+}
+
+void GLESRenderEngine::disableScissor() {
+ glDisable(GL_SCISSOR_TEST);
+}
+
+void GLESRenderEngine::genTextures(size_t count, uint32_t* names) {
+ glGenTextures(count, names);
+}
+
+void GLESRenderEngine::deleteTextures(size_t count, uint32_t const* names) {
+ for (int i = 0; i < count; ++i) {
+ mTextureView.erase(names[i]);
+ }
+ glDeleteTextures(count, names);
+}
+
+void GLESRenderEngine::bindExternalTextureImage(uint32_t texName, const Image& image) {
+ ATRACE_CALL();
+ const GLImage& glImage = static_cast<const GLImage&>(image);
+ const GLenum target = GL_TEXTURE_EXTERNAL_OES;
+
+ glBindTexture(target, texName);
+ if (glImage.getEGLImage() != EGL_NO_IMAGE_KHR) {
+ glEGLImageTargetTexture2DOES(target, static_cast<GLeglImageOES>(glImage.getEGLImage()));
+ }
+}
+
+status_t GLESRenderEngine::bindExternalTextureBuffer(uint32_t texName,
+ const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& bufferFence) {
+ if (buffer == nullptr) {
+ return BAD_VALUE;
+ }
+
+ ATRACE_CALL();
+
+ bool found = false;
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ auto cachedImage = mImageCache.find(buffer->getId());
+ found = (cachedImage != mImageCache.end());
+ }
+
+ // If we couldn't find the image in the cache at this time, then either
+ // SurfaceFlinger messed up registering the buffer ahead of time or we got
+ // backed up creating other EGLImages.
+ if (!found) {
+ status_t cacheResult = mImageManager->cache(buffer);
+ if (cacheResult != NO_ERROR) {
+ return cacheResult;
+ }
+ }
+
+ // Whether or not we needed to cache, re-check mImageCache to make sure that
+ // there's an EGLImage. The current threading model guarantees that we don't
+ // destroy a cached image until it's really not needed anymore (i.e. this
+ // function should not be called), so the only possibility is that something
+ // terrible went wrong and we should just bind something and move on.
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ auto cachedImage = mImageCache.find(buffer->getId());
+
+ if (cachedImage == mImageCache.end()) {
+ // We failed creating the image if we got here, so bail out.
+ ALOGE("Failed to create an EGLImage when rendering");
+ bindExternalTextureImage(texName, *createImage());
+ return NO_INIT;
+ }
+
+ bindExternalTextureImage(texName, *cachedImage->second);
+ mTextureView.insert_or_assign(texName, buffer->getId());
+ }
+
+ // Wait for the new buffer to be ready.
+ if (bufferFence != nullptr && bufferFence->isValid()) {
+ if (GLExtensions::getInstance().hasWaitSync()) {
+ base::unique_fd fenceFd(bufferFence->dup());
+ if (fenceFd == -1) {
+ ALOGE("error dup'ing fence fd: %d", errno);
+ return -errno;
+ }
+ if (!waitFence(std::move(fenceFd))) {
+ ALOGE("failed to wait on fence fd");
+ return UNKNOWN_ERROR;
+ }
+ } else {
+ status_t err = bufferFence->waitForever("RenderEngine::bindExternalTextureBuffer");
+ if (err != NO_ERROR) {
+ ALOGE("error waiting for fence: %d", err);
+ return err;
+ }
+ }
+ }
+
+ return NO_ERROR;
+}
+
+void GLESRenderEngine::cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) {
+ mImageManager->cacheAsync(buffer, nullptr);
+}
+
+std::shared_ptr<ImageManager::Barrier> GLESRenderEngine::cacheExternalTextureBufferForTesting(
+ const sp<GraphicBuffer>& buffer) {
+ auto barrier = std::make_shared<ImageManager::Barrier>();
+ mImageManager->cacheAsync(buffer, barrier);
+ return barrier;
+}
+
+status_t GLESRenderEngine::cacheExternalTextureBufferInternal(const sp<GraphicBuffer>& buffer) {
+ if (buffer == nullptr) {
+ return BAD_VALUE;
+ }
+
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ if (mImageCache.count(buffer->getId()) > 0) {
+ // If there's already an image then fail fast here.
+ return NO_ERROR;
+ }
+ }
+ ATRACE_CALL();
+
+ // Create the image without holding a lock so that we don't block anything.
+ std::unique_ptr<Image> newImage = createImage();
+
+ bool created = newImage->setNativeWindowBuffer(buffer->getNativeBuffer(),
+ buffer->getUsage() & GRALLOC_USAGE_PROTECTED);
+ if (!created) {
+ ALOGE("Failed to create image. size=%ux%u st=%u usage=%#" PRIx64 " fmt=%d",
+ buffer->getWidth(), buffer->getHeight(), buffer->getStride(), buffer->getUsage(),
+ buffer->getPixelFormat());
+ return NO_INIT;
+ }
+
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ if (mImageCache.count(buffer->getId()) > 0) {
+ // In theory it's possible for another thread to recache the image,
+ // so bail out if another thread won.
+ return NO_ERROR;
+ }
+ mImageCache.insert(std::make_pair(buffer->getId(), std::move(newImage)));
+ }
+
+ return NO_ERROR;
+}
+
+void GLESRenderEngine::unbindExternalTextureBuffer(uint64_t bufferId) {
+ mImageManager->releaseAsync(bufferId, nullptr);
+}
+
+std::shared_ptr<ImageManager::Barrier> GLESRenderEngine::unbindExternalTextureBufferForTesting(
+ uint64_t bufferId) {
+ auto barrier = std::make_shared<ImageManager::Barrier>();
+ mImageManager->releaseAsync(bufferId, barrier);
+ return barrier;
+}
+
+void GLESRenderEngine::unbindExternalTextureBufferInternal(uint64_t bufferId) {
+ std::unique_ptr<Image> image;
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ const auto& cachedImage = mImageCache.find(bufferId);
+
+ if (cachedImage != mImageCache.end()) {
+ ALOGV("Destroying image for buffer: %" PRIu64, bufferId);
+ // Move the buffer out of cache first, so that we can destroy
+ // without holding the cache's lock.
+ image = std::move(cachedImage->second);
+ mImageCache.erase(bufferId);
+ return;
+ }
+ }
+ ALOGV("Failed to find image for buffer: %" PRIu64, bufferId);
+}
+
+FloatRect GLESRenderEngine::setupLayerCropping(const LayerSettings& layer, Mesh& mesh) {
+ // Translate win by the rounded corners rect coordinates, to have all values in
+ // layer coordinate space.
+ FloatRect cropWin = layer.geometry.boundaries;
+ const FloatRect& roundedCornersCrop = layer.geometry.roundedCornersCrop;
+ cropWin.left -= roundedCornersCrop.left;
+ cropWin.right -= roundedCornersCrop.left;
+ cropWin.top -= roundedCornersCrop.top;
+ cropWin.bottom -= roundedCornersCrop.top;
+ Mesh::VertexArray<vec2> cropCoords(mesh.getCropCoordArray<vec2>());
+ cropCoords[0] = vec2(cropWin.left, cropWin.top);
+ cropCoords[1] = vec2(cropWin.left, cropWin.top + cropWin.getHeight());
+ cropCoords[2] = vec2(cropWin.right, cropWin.top + cropWin.getHeight());
+ cropCoords[3] = vec2(cropWin.right, cropWin.top);
+
+ setupCornerRadiusCropSize(roundedCornersCrop.getWidth(), roundedCornersCrop.getHeight());
+ return cropWin;
+}
+
+void GLESRenderEngine::handleRoundedCorners(const DisplaySettings& display,
+ const LayerSettings& layer, const Mesh& mesh) {
+ // We separate the layer into 3 parts essentially, such that we only turn on blending for the
+ // top rectangle and the bottom rectangle, and turn off blending for the middle rectangle.
+ FloatRect bounds = layer.geometry.roundedCornersCrop;
+
+ // Explicitly compute the transform from the clip rectangle to the physical
+ // display. Normally, this is done in glViewport but we explicitly compute
+ // it here so that we can get the scissor bounds correct.
+ const Rect& source = display.clip;
+ const Rect& destination = display.physicalDisplay;
+ // Here we compute the following transform:
+ // 1. Translate the top left corner of the source clip to (0, 0)
+ // 2. Rotate the clip rectangle about the origin in accordance with the
+ // orientation flag
+ // 3. Translate the top left corner back to the origin.
+ // 4. Scale the clip rectangle to the destination rectangle dimensions
+ // 5. Translate the top left corner to the destination rectangle's top left
+ // corner.
+ const mat4 translateSource = mat4::translate(vec4(-source.left, -source.top, 0, 1));
+ mat4 rotation;
+ int displacementX = 0;
+ int displacementY = 0;
+ float destinationWidth = static_cast<float>(destination.getWidth());
+ float destinationHeight = static_cast<float>(destination.getHeight());
+ float sourceWidth = static_cast<float>(source.getWidth());
+ float sourceHeight = static_cast<float>(source.getHeight());
+ const float rot90InRadians = 2.0f * static_cast<float>(M_PI) / 4.0f;
+ switch (display.orientation) {
+ case ui::Transform::ROT_90:
+ rotation = mat4::rotate(rot90InRadians, vec3(0, 0, 1));
+ displacementX = source.getHeight();
+ std::swap(sourceHeight, sourceWidth);
+ break;
+ case ui::Transform::ROT_180:
+ rotation = mat4::rotate(rot90InRadians * 2.0f, vec3(0, 0, 1));
+ displacementY = source.getHeight();
+ displacementX = source.getWidth();
+ break;
+ case ui::Transform::ROT_270:
+ rotation = mat4::rotate(rot90InRadians * 3.0f, vec3(0, 0, 1));
+ displacementY = source.getWidth();
+ std::swap(sourceHeight, sourceWidth);
+ break;
+ default:
+ break;
+ }
+
+ const mat4 intermediateTranslation = mat4::translate(vec4(displacementX, displacementY, 0, 1));
+ const mat4 scale = mat4::scale(
+ vec4(destinationWidth / sourceWidth, destinationHeight / sourceHeight, 1, 1));
+ const mat4 translateDestination =
+ mat4::translate(vec4(destination.left, destination.top, 0, 1));
+ const mat4 globalTransform =
+ translateDestination * scale * intermediateTranslation * rotation * translateSource;
+
+ const mat4 transformMatrix = globalTransform * layer.geometry.positionTransform;
+ const vec4 leftTopCoordinate(bounds.left, bounds.top, 1.0, 1.0);
+ const vec4 rightBottomCoordinate(bounds.right, bounds.bottom, 1.0, 1.0);
+ const vec4 leftTopCoordinateInBuffer = transformMatrix * leftTopCoordinate;
+ const vec4 rightBottomCoordinateInBuffer = transformMatrix * rightBottomCoordinate;
+ bounds = FloatRect(std::min(leftTopCoordinateInBuffer[0], rightBottomCoordinateInBuffer[0]),
+ std::min(leftTopCoordinateInBuffer[1], rightBottomCoordinateInBuffer[1]),
+ std::max(leftTopCoordinateInBuffer[0], rightBottomCoordinateInBuffer[0]),
+ std::max(leftTopCoordinateInBuffer[1], rightBottomCoordinateInBuffer[1]));
+
+ // Finally, we cut the layer into 3 parts, with top and bottom parts having rounded corners
+ // and the middle part without rounded corners.
+ const int32_t radius = ceil(layer.geometry.roundedCornersRadius);
+ const Rect topRect(bounds.left, bounds.top, bounds.right, bounds.top + radius);
+ setScissor(topRect);
+ drawMesh(mesh);
+ const Rect bottomRect(bounds.left, bounds.bottom - radius, bounds.right, bounds.bottom);
+ setScissor(bottomRect);
+ drawMesh(mesh);
+
+ // The middle part of the layer can turn off blending.
+ if (topRect.bottom < bottomRect.top) {
+ const Rect middleRect(bounds.left, bounds.top + radius, bounds.right,
+ bounds.bottom - radius);
+ setScissor(middleRect);
+ mState.cornerRadius = 0.0;
+ disableBlending();
+ drawMesh(mesh);
+ }
+ disableScissor();
+}
+
+status_t GLESRenderEngine::bindFrameBuffer(Framebuffer* framebuffer) {
+ ATRACE_CALL();
+ GLFramebuffer* glFramebuffer = static_cast<GLFramebuffer*>(framebuffer);
+ EGLImageKHR eglImage = glFramebuffer->getEGLImage();
+ uint32_t textureName = glFramebuffer->getTextureName();
+ uint32_t framebufferName = glFramebuffer->getFramebufferName();
+
+ // Bind the texture and turn our EGLImage into a texture
+ glBindTexture(GL_TEXTURE_2D, textureName);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, (GLeglImageOES)eglImage);
+
+ // Bind the Framebuffer to render into
+ glBindFramebuffer(GL_FRAMEBUFFER, framebufferName);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureName, 0);
+
+ uint32_t glStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+ ALOGE_IF(glStatus != GL_FRAMEBUFFER_COMPLETE_OES, "glCheckFramebufferStatusOES error %d",
+ glStatus);
+
+ return glStatus == GL_FRAMEBUFFER_COMPLETE_OES ? NO_ERROR : BAD_VALUE;
+}
+
+void GLESRenderEngine::unbindFrameBuffer(Framebuffer* /*framebuffer*/) {
+ ATRACE_CALL();
+
+ // back to main framebuffer
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+}
+
+bool GLESRenderEngine::cleanupPostRender(CleanupMode mode) {
+ ATRACE_CALL();
+
+ if (mPriorResourcesCleaned ||
+ (mLastDrawFence != nullptr && mLastDrawFence->getStatus() != Fence::Status::Signaled)) {
+ // If we don't have a prior frame needing cleanup, then don't do anything.
+ return false;
+ }
+
+ // This is a bit of a band-aid fix for FrameCaptureProcessor, as we should
+ // not need to keep memory around if we don't need to do so.
+ if (mode == CleanupMode::CLEAN_ALL) {
+ // TODO: SurfaceFlinger memory utilization may benefit from resetting
+ // texture bindings as well. Assess if it does and there's no performance regression
+ // when rebinding the same image data to the same texture, and if so then its mode
+ // behavior can be tweaked.
+ if (mPlaceholderImage != EGL_NO_IMAGE_KHR) {
+ for (auto [textureName, bufferId] : mTextureView) {
+ if (bufferId && mPlaceholderImage != EGL_NO_IMAGE_KHR) {
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, textureName);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES,
+ static_cast<GLeglImageOES>(mPlaceholderImage));
+ mTextureView[textureName] = std::nullopt;
+ checkErrors();
+ }
+ }
+ }
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ mImageCache.clear();
+ }
+ }
+
+ // Bind the texture to placeholder so that backing image data can be freed.
+ GLFramebuffer* glFramebuffer = static_cast<GLFramebuffer*>(getFramebufferForDrawing());
+ glFramebuffer->allocateBuffers(1, 1, mPlaceholderDrawBuffer);
+ // Release the cached fence here, so that we don't churn reallocations when
+ // we could no-op repeated calls of this method instead.
+ mLastDrawFence = nullptr;
+ mPriorResourcesCleaned = true;
+ return true;
+}
+
+void GLESRenderEngine::checkErrors() const {
+ checkErrors(nullptr);
+}
+
+void GLESRenderEngine::checkErrors(const char* tag) const {
+ do {
+ // there could be more than one error flag
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) break;
+ if (tag == nullptr) {
+ ALOGE("GL error 0x%04x", int(error));
+ } else {
+ ALOGE("GL error: %s -> 0x%04x", tag, int(error));
+ }
+ } while (true);
+}
+
+bool GLESRenderEngine::supportsProtectedContent() const {
+ return mProtectedEGLContext != EGL_NO_CONTEXT;
+}
+
+bool GLESRenderEngine::useProtectedContext(bool useProtectedContext) {
+ if (useProtectedContext == mInProtectedContext) {
+ return true;
+ }
+ if (useProtectedContext && mProtectedEGLContext == EGL_NO_CONTEXT) {
+ return false;
+ }
+ const EGLSurface surface = useProtectedContext ? mProtectedStubSurface : mStubSurface;
+ const EGLContext context = useProtectedContext ? mProtectedEGLContext : mEGLContext;
+ const bool success = eglMakeCurrent(mEGLDisplay, surface, surface, context) == EGL_TRUE;
+ if (success) {
+ mInProtectedContext = useProtectedContext;
+ }
+ return success;
+}
+EGLImageKHR GLESRenderEngine::createFramebufferImageIfNeeded(ANativeWindowBuffer* nativeBuffer,
+ bool isProtected,
+ bool useFramebufferCache) {
+ sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(nativeBuffer);
+ if (useFramebufferCache) {
+ std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
+ for (const auto& image : mFramebufferImageCache) {
+ if (image.first == graphicBuffer->getId()) {
+ return image.second;
+ }
+ }
+ }
+ EGLint attributes[] = {
+ isProtected ? EGL_PROTECTED_CONTENT_EXT : EGL_NONE,
+ isProtected ? EGL_TRUE : EGL_NONE,
+ EGL_NONE,
+ };
+ EGLImageKHR image = eglCreateImageKHR(mEGLDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ nativeBuffer, attributes);
+ if (useFramebufferCache) {
+ if (image != EGL_NO_IMAGE_KHR) {
+ std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
+ if (mFramebufferImageCache.size() >= mFramebufferImageCacheSize) {
+ EGLImageKHR expired = mFramebufferImageCache.front().second;
+ mFramebufferImageCache.pop_front();
+ eglDestroyImageKHR(mEGLDisplay, expired);
+ DEBUG_EGL_IMAGE_TRACKER_DESTROY();
+ }
+ mFramebufferImageCache.push_back({graphicBuffer->getId(), image});
+ }
+ }
+
+ if (image != EGL_NO_IMAGE_KHR) {
+ DEBUG_EGL_IMAGE_TRACKER_CREATE();
+ }
+ return image;
+}
+
+status_t GLESRenderEngine::drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer,
+ const bool useFramebufferCache, base::unique_fd&& bufferFence,
+ base::unique_fd* drawFence) {
+ ATRACE_CALL();
+ if (layers.empty()) {
+ ALOGV("Drawing empty layer stack");
+ return NO_ERROR;
+ }
+
+ if (bufferFence.get() >= 0) {
+ // Duplicate the fence for passing to waitFence.
+ base::unique_fd bufferFenceDup(dup(bufferFence.get()));
+ if (bufferFenceDup < 0 || !waitFence(std::move(bufferFenceDup))) {
+ ATRACE_NAME("Waiting before draw");
+ sync_wait(bufferFence.get(), -1);
+ }
+ }
+
+ if (buffer == nullptr) {
+ ALOGE("No output buffer provided. Aborting GPU composition.");
+ return BAD_VALUE;
+ }
+
+ std::unique_ptr<BindNativeBufferAsFramebuffer> fbo;
+ // Gathering layers that requested blur, we'll need them to decide when to render to an
+ // offscreen buffer, and when to render to the native buffer.
+ std::deque<const LayerSettings*> blurLayers;
+ if (CC_LIKELY(mBlurFilter != nullptr)) {
+ for (auto layer : layers) {
+ if (layer->backgroundBlurRadius > 0) {
+ blurLayers.push_back(layer);
+ }
+ }
+ }
+ const auto blurLayersSize = blurLayers.size();
+
+ if (blurLayersSize == 0) {
+ fbo = std::make_unique<BindNativeBufferAsFramebuffer>(*this,
+ buffer.get()->getNativeBuffer(),
+ useFramebufferCache);
+ if (fbo->getStatus() != NO_ERROR) {
+ ALOGE("Failed to bind framebuffer! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors();
+ return fbo->getStatus();
+ }
+ setViewportAndProjection(display.physicalDisplay, display.clip);
+ } else {
+ setViewportAndProjection(display.physicalDisplay, display.clip);
+ auto status =
+ mBlurFilter->setAsDrawTarget(display, blurLayers.front()->backgroundBlurRadius);
+ if (status != NO_ERROR) {
+ ALOGE("Failed to prepare blur filter! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors();
+ return status;
+ }
+ }
+
+ // clear the entire buffer, sometimes when we reuse buffers we'd persist
+ // ghost images otherwise.
+ // we also require a full transparent framebuffer for overlays. This is
+ // probably not quite efficient on all GPUs, since we could filter out
+ // opaque layers.
+ clearWithColor(0.0, 0.0, 0.0, 0.0);
+
+ setOutputDataSpace(display.outputDataspace);
+ setDisplayMaxLuminance(display.maxLuminance);
+
+ const mat4 projectionMatrix =
+ ui::Transform(display.orientation).asMatrix4() * mState.projectionMatrix;
+ if (!display.clearRegion.isEmpty()) {
+ glDisable(GL_BLEND);
+ fillRegionWithColor(display.clearRegion, 0.0, 0.0, 0.0, 1.0);
+ }
+
+ Mesh mesh = Mesh::Builder()
+ .setPrimitive(Mesh::TRIANGLE_FAN)
+ .setVertices(4 /* count */, 2 /* size */)
+ .setTexCoords(2 /* size */)
+ .setCropCoords(2 /* size */)
+ .build();
+ for (auto const layer : layers) {
+ if (blurLayers.size() > 0 && blurLayers.front() == layer) {
+ blurLayers.pop_front();
+
+ auto status = mBlurFilter->prepare();
+ if (status != NO_ERROR) {
+ ALOGE("Failed to render blur effect! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors("Can't render first blur pass");
+ return status;
+ }
+
+ if (blurLayers.size() == 0) {
+ // Done blurring, time to bind the native FBO and render our blur onto it.
+ fbo = std::make_unique<BindNativeBufferAsFramebuffer>(*this,
+ buffer.get()
+ ->getNativeBuffer(),
+ useFramebufferCache);
+ status = fbo->getStatus();
+ setViewportAndProjection(display.physicalDisplay, display.clip);
+ } else {
+ // There's still something else to blur, so let's keep rendering to our FBO
+ // instead of to the display.
+ status = mBlurFilter->setAsDrawTarget(display,
+ blurLayers.front()->backgroundBlurRadius);
+ }
+ if (status != NO_ERROR) {
+ ALOGE("Failed to bind framebuffer! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors("Can't bind native framebuffer");
+ return status;
+ }
+
+ status = mBlurFilter->render(blurLayersSize > 1);
+ if (status != NO_ERROR) {
+ ALOGE("Failed to render blur effect! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors("Can't render blur filter");
+ return status;
+ }
+ }
+
+ mState.maxMasteringLuminance = layer->source.buffer.maxMasteringLuminance;
+ mState.maxContentLuminance = layer->source.buffer.maxContentLuminance;
+ mState.projectionMatrix = projectionMatrix * layer->geometry.positionTransform;
+
+ const FloatRect bounds = layer->geometry.boundaries;
+ Mesh::VertexArray<vec2> position(mesh.getPositionArray<vec2>());
+ position[0] = vec2(bounds.left, bounds.top);
+ position[1] = vec2(bounds.left, bounds.bottom);
+ position[2] = vec2(bounds.right, bounds.bottom);
+ position[3] = vec2(bounds.right, bounds.top);
+
+ setupLayerCropping(*layer, mesh);
+ setColorTransform(display.colorTransform * layer->colorTransform);
+
+ bool usePremultipliedAlpha = true;
+ bool disableTexture = true;
+ bool isOpaque = false;
+ if (layer->source.buffer.buffer != nullptr) {
+ disableTexture = false;
+ isOpaque = layer->source.buffer.isOpaque;
+
+ sp<GraphicBuffer> gBuf = layer->source.buffer.buffer;
+ bindExternalTextureBuffer(layer->source.buffer.textureName, gBuf,
+ layer->source.buffer.fence);
+
+ usePremultipliedAlpha = layer->source.buffer.usePremultipliedAlpha;
+ Texture texture(Texture::TEXTURE_EXTERNAL, layer->source.buffer.textureName);
+ mat4 texMatrix = layer->source.buffer.textureTransform;
+
+ texture.setMatrix(texMatrix.asArray());
+ texture.setFiltering(layer->source.buffer.useTextureFiltering);
+
+ texture.setDimensions(gBuf->getWidth(), gBuf->getHeight());
+ setSourceY410BT2020(layer->source.buffer.isY410BT2020);
+
+ renderengine::Mesh::VertexArray<vec2> texCoords(mesh.getTexCoordArray<vec2>());
+ texCoords[0] = vec2(0.0, 0.0);
+ texCoords[1] = vec2(0.0, 1.0);
+ texCoords[2] = vec2(1.0, 1.0);
+ texCoords[3] = vec2(1.0, 0.0);
+ setupLayerTexturing(texture);
+ }
+
+ const half3 solidColor = layer->source.solidColor;
+ const half4 color = half4(solidColor.r, solidColor.g, solidColor.b, layer->alpha);
+ // Buffer sources will have a black solid color ignored in the shader,
+ // so in that scenario the solid color passed here is arbitrary.
+ setupLayerBlending(usePremultipliedAlpha, isOpaque, disableTexture, color,
+ layer->geometry.roundedCornersRadius);
+ if (layer->disableBlending) {
+ glDisable(GL_BLEND);
+ }
+ setSourceDataSpace(layer->sourceDataspace);
+
+ if (layer->shadow.length > 0.0f) {
+ handleShadow(layer->geometry.boundaries, layer->geometry.roundedCornersRadius,
+ layer->shadow);
+ }
+ // We only want to do a special handling for rounded corners when having rounded corners
+ // is the only reason it needs to turn on blending, otherwise, we handle it like the
+ // usual way since it needs to turn on blending anyway.
+ else if (layer->geometry.roundedCornersRadius > 0.0 && color.a >= 1.0f && isOpaque) {
+ handleRoundedCorners(display, *layer, mesh);
+ } else {
+ drawMesh(mesh);
+ }
+
+ // Cleanup if there's a buffer source
+ if (layer->source.buffer.buffer != nullptr) {
+ disableBlending();
+ setSourceY410BT2020(false);
+ disableTexturing();
+ }
+ }
+
+ if (drawFence != nullptr) {
+ *drawFence = flush();
+ }
+ // If flush failed or we don't support native fences, we need to force the
+ // gl command stream to be executed.
+ if (drawFence == nullptr || drawFence->get() < 0) {
+ bool success = finish();
+ if (!success) {
+ ALOGE("Failed to flush RenderEngine commands");
+ checkErrors();
+ // Chances are, something illegal happened (either the caller passed
+ // us bad parameters, or we messed up our shader generation).
+ return INVALID_OPERATION;
+ }
+ mLastDrawFence = nullptr;
+ } else {
+ // The caller takes ownership of drawFence, so we need to duplicate the
+ // fd here.
+ mLastDrawFence = new Fence(dup(drawFence->get()));
+ }
+ mPriorResourcesCleaned = false;
+
+ checkErrors();
+ return NO_ERROR;
+}
+
+void GLESRenderEngine::setViewportAndProjection(Rect viewport, Rect clip) {
+ ATRACE_CALL();
+ mVpWidth = viewport.getWidth();
+ mVpHeight = viewport.getHeight();
+
+ // We pass the top left corner instead of the bottom left corner,
+ // because since we're rendering off-screen first.
+ glViewport(viewport.left, viewport.top, mVpWidth, mVpHeight);
+
+ mState.projectionMatrix = mat4::ortho(clip.left, clip.right, clip.top, clip.bottom, 0, 1);
+}
+
+void GLESRenderEngine::setupLayerBlending(bool premultipliedAlpha, bool opaque, bool disableTexture,
+ const half4& color, float cornerRadius) {
+ mState.isPremultipliedAlpha = premultipliedAlpha;
+ mState.isOpaque = opaque;
+ mState.color = color;
+ mState.cornerRadius = cornerRadius;
+
+ if (disableTexture) {
+ mState.textureEnabled = false;
+ }
+
+ if (color.a < 1.0f || !opaque || cornerRadius > 0.0f) {
+ glEnable(GL_BLEND);
+ glBlendFunc(premultipliedAlpha ? GL_ONE : GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+ } else {
+ glDisable(GL_BLEND);
+ }
+}
+
+void GLESRenderEngine::setSourceY410BT2020(bool enable) {
+ mState.isY410BT2020 = enable;
+}
+
+void GLESRenderEngine::setSourceDataSpace(Dataspace source) {
+ mDataSpace = source;
+}
+
+void GLESRenderEngine::setOutputDataSpace(Dataspace dataspace) {
+ mOutputDataSpace = dataspace;
+}
+
+void GLESRenderEngine::setDisplayMaxLuminance(const float maxLuminance) {
+ mState.displayMaxLuminance = maxLuminance;
+}
+
+void GLESRenderEngine::setupLayerTexturing(const Texture& texture) {
+ GLuint target = texture.getTextureTarget();
+ glBindTexture(target, texture.getTextureName());
+ GLenum filter = GL_NEAREST;
+ if (texture.getFiltering()) {
+ filter = GL_LINEAR;
+ }
+ glTexParameteri(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(target, GL_TEXTURE_MAG_FILTER, filter);
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER, filter);
+
+ mState.texture = texture;
+ mState.textureEnabled = true;
+}
+
+void GLESRenderEngine::setColorTransform(const mat4& colorTransform) {
+ mState.colorMatrix = colorTransform;
+}
+
+void GLESRenderEngine::disableTexturing() {
+ mState.textureEnabled = false;
+}
+
+void GLESRenderEngine::disableBlending() {
+ glDisable(GL_BLEND);
+}
+
+void GLESRenderEngine::setupFillWithColor(float r, float g, float b, float a) {
+ mState.isPremultipliedAlpha = true;
+ mState.isOpaque = false;
+ mState.color = half4(r, g, b, a);
+ mState.textureEnabled = false;
+ glDisable(GL_BLEND);
+}
+
+void GLESRenderEngine::setupCornerRadiusCropSize(float width, float height) {
+ mState.cropSize = half2(width, height);
+}
+
+void GLESRenderEngine::drawMesh(const Mesh& mesh) {
+ ATRACE_CALL();
+ if (mesh.getTexCoordsSize()) {
+ glEnableVertexAttribArray(Program::texCoords);
+ glVertexAttribPointer(Program::texCoords, mesh.getTexCoordsSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getTexCoords());
+ }
+
+ glVertexAttribPointer(Program::position, mesh.getVertexSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getPositions());
+
+ if (mState.cornerRadius > 0.0f) {
+ glEnableVertexAttribArray(Program::cropCoords);
+ glVertexAttribPointer(Program::cropCoords, mesh.getVertexSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getCropCoords());
+ }
+
+ if (mState.drawShadows) {
+ glEnableVertexAttribArray(Program::shadowColor);
+ glVertexAttribPointer(Program::shadowColor, mesh.getShadowColorSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getShadowColor());
+
+ glEnableVertexAttribArray(Program::shadowParams);
+ glVertexAttribPointer(Program::shadowParams, mesh.getShadowParamsSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getShadowParams());
+ }
+
+ Description managedState = mState;
+ // By default, DISPLAY_P3 is the only supported wide color output. However,
+ // when HDR content is present, hardware composer may be able to handle
+ // BT2020 data space, in that case, the output data space is set to be
+ // BT2020_HLG or BT2020_PQ respectively. In GPU fall back we need
+ // to respect this and convert non-HDR content to HDR format.
+ if (mUseColorManagement) {
+ Dataspace inputStandard = static_cast<Dataspace>(mDataSpace & Dataspace::STANDARD_MASK);
+ Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
+ Dataspace outputStandard =
+ static_cast<Dataspace>(mOutputDataSpace & Dataspace::STANDARD_MASK);
+ Dataspace outputTransfer =
+ static_cast<Dataspace>(mOutputDataSpace & Dataspace::TRANSFER_MASK);
+ bool needsXYZConversion = needsXYZTransformMatrix();
+
+ // NOTE: if the input standard of the input dataspace is not STANDARD_DCI_P3 or
+ // STANDARD_BT2020, it will be treated as STANDARD_BT709
+ if (inputStandard != Dataspace::STANDARD_DCI_P3 &&
+ inputStandard != Dataspace::STANDARD_BT2020) {
+ inputStandard = Dataspace::STANDARD_BT709;
+ }
+
+ if (needsXYZConversion) {
+ // The supported input color spaces are standard RGB, Display P3 and BT2020.
+ switch (inputStandard) {
+ case Dataspace::STANDARD_DCI_P3:
+ managedState.inputTransformMatrix = mDisplayP3ToXyz;
+ break;
+ case Dataspace::STANDARD_BT2020:
+ managedState.inputTransformMatrix = mBt2020ToXyz;
+ break;
+ default:
+ managedState.inputTransformMatrix = mSrgbToXyz;
+ break;
+ }
+
+ // The supported output color spaces are BT2020, Display P3 and standard RGB.
+ switch (outputStandard) {
+ case Dataspace::STANDARD_BT2020:
+ managedState.outputTransformMatrix = mXyzToBt2020;
+ break;
+ case Dataspace::STANDARD_DCI_P3:
+ managedState.outputTransformMatrix = mXyzToDisplayP3;
+ break;
+ default:
+ managedState.outputTransformMatrix = mXyzToSrgb;
+ break;
+ }
+ } else if (inputStandard != outputStandard) {
+ // At this point, the input data space and output data space could be both
+ // HDR data spaces, but they match each other, we do nothing in this case.
+ // In addition to the case above, the input data space could be
+ // - scRGB linear
+ // - scRGB non-linear
+ // - sRGB
+ // - Display P3
+ // - BT2020
+ // The output data spaces could be
+ // - sRGB
+ // - Display P3
+ // - BT2020
+ switch (outputStandard) {
+ case Dataspace::STANDARD_BT2020:
+ if (inputStandard == Dataspace::STANDARD_BT709) {
+ managedState.outputTransformMatrix = mSrgbToBt2020;
+ } else if (inputStandard == Dataspace::STANDARD_DCI_P3) {
+ managedState.outputTransformMatrix = mDisplayP3ToBt2020;
+ }
+ break;
+ case Dataspace::STANDARD_DCI_P3:
+ if (inputStandard == Dataspace::STANDARD_BT709) {
+ managedState.outputTransformMatrix = mSrgbToDisplayP3;
+ } else if (inputStandard == Dataspace::STANDARD_BT2020) {
+ managedState.outputTransformMatrix = mBt2020ToDisplayP3;
+ }
+ break;
+ default:
+ if (inputStandard == Dataspace::STANDARD_DCI_P3) {
+ managedState.outputTransformMatrix = mDisplayP3ToSrgb;
+ } else if (inputStandard == Dataspace::STANDARD_BT2020) {
+ managedState.outputTransformMatrix = mBt2020ToSrgb;
+ }
+ break;
+ }
+ }
+
+ // we need to convert the RGB value to linear space and convert it back when:
+ // - there is a color matrix that is not an identity matrix, or
+ // - there is an output transform matrix that is not an identity matrix, or
+ // - the input transfer function doesn't match the output transfer function.
+ if (managedState.hasColorMatrix() || managedState.hasOutputTransformMatrix() ||
+ inputTransfer != outputTransfer) {
+ managedState.inputTransferFunction =
+ Description::dataSpaceToTransferFunction(inputTransfer);
+ managedState.outputTransferFunction =
+ Description::dataSpaceToTransferFunction(outputTransfer);
+ }
+ }
+
+ ProgramCache::getInstance().useProgram(mInProtectedContext ? mProtectedEGLContext : mEGLContext,
+ managedState);
+
+ if (mState.drawShadows) {
+ glDrawElements(mesh.getPrimitive(), mesh.getIndexCount(), GL_UNSIGNED_SHORT,
+ mesh.getIndices());
+ } else {
+ glDrawArrays(mesh.getPrimitive(), 0, mesh.getVertexCount());
+ }
+
+ if (mUseColorManagement && outputDebugPPMs) {
+ static uint64_t managedColorFrameCount = 0;
+ std::ostringstream out;
+ out << "/data/texture_out" << managedColorFrameCount++;
+ writePPM(out.str().c_str(), mVpWidth, mVpHeight);
+ }
+
+ if (mesh.getTexCoordsSize()) {
+ glDisableVertexAttribArray(Program::texCoords);
+ }
+
+ if (mState.cornerRadius > 0.0f) {
+ glDisableVertexAttribArray(Program::cropCoords);
+ }
+
+ if (mState.drawShadows) {
+ glDisableVertexAttribArray(Program::shadowColor);
+ glDisableVertexAttribArray(Program::shadowParams);
+ }
+}
+
+size_t GLESRenderEngine::getMaxTextureSize() const {
+ return mMaxTextureSize;
+}
+
+size_t GLESRenderEngine::getMaxViewportDims() const {
+ return mMaxViewportDims[0] < mMaxViewportDims[1] ? mMaxViewportDims[0] : mMaxViewportDims[1];
+}
+
+void GLESRenderEngine::dump(std::string& result) {
+ const GLExtensions& extensions = GLExtensions::getInstance();
+ ProgramCache& cache = ProgramCache::getInstance();
+
+ StringAppendF(&result, "EGL implementation : %s\n", extensions.getEGLVersion());
+ StringAppendF(&result, "%s\n", extensions.getEGLExtensions());
+ StringAppendF(&result, "GLES: %s, %s, %s\n", extensions.getVendor(), extensions.getRenderer(),
+ extensions.getVersion());
+ StringAppendF(&result, "%s\n", extensions.getExtensions());
+ StringAppendF(&result, "RenderEngine supports protected context: %d\n",
+ supportsProtectedContent());
+ StringAppendF(&result, "RenderEngine is in protected context: %d\n", mInProtectedContext);
+ StringAppendF(&result, "RenderEngine program cache size for unprotected context: %zu\n",
+ cache.getSize(mEGLContext));
+ StringAppendF(&result, "RenderEngine program cache size for protected context: %zu\n",
+ cache.getSize(mProtectedEGLContext));
+ StringAppendF(&result, "RenderEngine last dataspace conversion: (%s) to (%s)\n",
+ dataspaceDetails(static_cast<android_dataspace>(mDataSpace)).c_str(),
+ dataspaceDetails(static_cast<android_dataspace>(mOutputDataSpace)).c_str());
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ StringAppendF(&result, "RenderEngine image cache size: %zu\n", mImageCache.size());
+ StringAppendF(&result, "Dumping buffer ids...\n");
+ for (const auto& [id, unused] : mImageCache) {
+ StringAppendF(&result, "0x%" PRIx64 "\n", id);
+ }
+ }
+ {
+ std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
+ StringAppendF(&result, "RenderEngine framebuffer image cache size: %zu\n",
+ mFramebufferImageCache.size());
+ StringAppendF(&result, "Dumping buffer ids...\n");
+ for (const auto& [id, unused] : mFramebufferImageCache) {
+ StringAppendF(&result, "0x%" PRIx64 "\n", id);
+ }
+ }
+}
+
+GLESRenderEngine::GlesVersion GLESRenderEngine::parseGlesVersion(const char* str) {
+ int major, minor;
+ if (sscanf(str, "OpenGL ES-CM %d.%d", &major, &minor) != 2) {
+ if (sscanf(str, "OpenGL ES %d.%d", &major, &minor) != 2) {
+ ALOGW("Unable to parse GL_VERSION string: \"%s\"", str);
+ return GLES_VERSION_1_0;
+ }
+ }
+
+ if (major == 1 && minor == 0) return GLES_VERSION_1_0;
+ if (major == 1 && minor >= 1) return GLES_VERSION_1_1;
+ if (major == 2 && minor >= 0) return GLES_VERSION_2_0;
+ if (major == 3 && minor >= 0) return GLES_VERSION_3_0;
+
+ ALOGW("Unrecognized OpenGL ES version: %d.%d", major, minor);
+ return GLES_VERSION_1_0;
+}
+
+EGLContext GLESRenderEngine::createEglContext(EGLDisplay display, EGLConfig config,
+ EGLContext shareContext, bool useContextPriority,
+ Protection protection) {
+ EGLint renderableType = 0;
+ if (config == EGL_NO_CONFIG) {
+ renderableType = EGL_OPENGL_ES3_BIT;
+ } else if (!eglGetConfigAttrib(display, config, EGL_RENDERABLE_TYPE, &renderableType)) {
+ LOG_ALWAYS_FATAL("can't query EGLConfig RENDERABLE_TYPE");
+ }
+ EGLint contextClientVersion = 0;
+ if (renderableType & EGL_OPENGL_ES3_BIT) {
+ contextClientVersion = 3;
+ } else if (renderableType & EGL_OPENGL_ES2_BIT) {
+ contextClientVersion = 2;
+ } else if (renderableType & EGL_OPENGL_ES_BIT) {
+ contextClientVersion = 1;
+ } else {
+ LOG_ALWAYS_FATAL("no supported EGL_RENDERABLE_TYPEs");
+ }
+
+ std::vector<EGLint> contextAttributes;
+ contextAttributes.reserve(7);
+ contextAttributes.push_back(EGL_CONTEXT_CLIENT_VERSION);
+ contextAttributes.push_back(contextClientVersion);
+ if (useContextPriority) {
+ contextAttributes.push_back(EGL_CONTEXT_PRIORITY_LEVEL_IMG);
+ contextAttributes.push_back(EGL_CONTEXT_PRIORITY_HIGH_IMG);
+ }
+ if (protection == Protection::PROTECTED) {
+ contextAttributes.push_back(EGL_PROTECTED_CONTENT_EXT);
+ contextAttributes.push_back(EGL_TRUE);
+ }
+ contextAttributes.push_back(EGL_NONE);
+
+ EGLContext context = eglCreateContext(display, config, shareContext, contextAttributes.data());
+
+ if (contextClientVersion == 3 && context == EGL_NO_CONTEXT) {
+ // eglGetConfigAttrib indicated we can create GLES 3 context, but we failed, thus
+ // EGL_NO_CONTEXT so that we can abort.
+ if (config != EGL_NO_CONFIG) {
+ return context;
+ }
+ // If |config| is EGL_NO_CONFIG, we speculatively try to create GLES 3 context, so we should
+ // try to fall back to GLES 2.
+ contextAttributes[1] = 2;
+ context = eglCreateContext(display, config, shareContext, contextAttributes.data());
+ }
+
+ return context;
+}
+
+EGLSurface GLESRenderEngine::createStubEglPbufferSurface(EGLDisplay display, EGLConfig config,
+ int hwcFormat, Protection protection) {
+ EGLConfig stubConfig = config;
+ if (stubConfig == EGL_NO_CONFIG) {
+ stubConfig = chooseEglConfig(display, hwcFormat, /*logConfig*/ true);
+ }
+ std::vector<EGLint> attributes;
+ attributes.reserve(7);
+ attributes.push_back(EGL_WIDTH);
+ attributes.push_back(1);
+ attributes.push_back(EGL_HEIGHT);
+ attributes.push_back(1);
+ if (protection == Protection::PROTECTED) {
+ attributes.push_back(EGL_PROTECTED_CONTENT_EXT);
+ attributes.push_back(EGL_TRUE);
+ }
+ attributes.push_back(EGL_NONE);
+
+ return eglCreatePbufferSurface(display, stubConfig, attributes.data());
+}
+
+bool GLESRenderEngine::isHdrDataSpace(const Dataspace dataSpace) const {
+ const Dataspace standard = static_cast<Dataspace>(dataSpace & Dataspace::STANDARD_MASK);
+ const Dataspace transfer = static_cast<Dataspace>(dataSpace & Dataspace::TRANSFER_MASK);
+ return standard == Dataspace::STANDARD_BT2020 &&
+ (transfer == Dataspace::TRANSFER_ST2084 || transfer == Dataspace::TRANSFER_HLG);
+}
+
+// For convenience, we want to convert the input color space to XYZ color space first,
+// and then convert from XYZ color space to output color space when
+// - SDR and HDR contents are mixed, either SDR content will be converted to HDR or
+// HDR content will be tone-mapped to SDR; Or,
+// - there are HDR PQ and HLG contents presented at the same time, where we want to convert
+// HLG content to PQ content.
+// In either case above, we need to operate the Y value in XYZ color space. Thus, when either
+// input data space or output data space is HDR data space, and the input transfer function
+// doesn't match the output transfer function, we would enable an intermediate transfrom to
+// XYZ color space.
+bool GLESRenderEngine::needsXYZTransformMatrix() const {
+ const bool isInputHdrDataSpace = isHdrDataSpace(mDataSpace);
+ const bool isOutputHdrDataSpace = isHdrDataSpace(mOutputDataSpace);
+ const Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
+ const Dataspace outputTransfer =
+ static_cast<Dataspace>(mOutputDataSpace & Dataspace::TRANSFER_MASK);
+
+ return (isInputHdrDataSpace || isOutputHdrDataSpace) && inputTransfer != outputTransfer;
+}
+
+bool GLESRenderEngine::isImageCachedForTesting(uint64_t bufferId) {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ const auto& cachedImage = mImageCache.find(bufferId);
+ return cachedImage != mImageCache.end();
+}
+
+bool GLESRenderEngine::isTextureNameKnownForTesting(uint32_t texName) {
+ const auto& entry = mTextureView.find(texName);
+ return entry != mTextureView.end();
+}
+
+std::optional<uint64_t> GLESRenderEngine::getBufferIdForTextureNameForTesting(uint32_t texName) {
+ const auto& entry = mTextureView.find(texName);
+ return entry != mTextureView.end() ? entry->second : std::nullopt;
+}
+
+bool GLESRenderEngine::isFramebufferImageCachedForTesting(uint64_t bufferId) {
+ std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
+ return std::any_of(mFramebufferImageCache.cbegin(), mFramebufferImageCache.cend(),
+ [=](std::pair<uint64_t, EGLImageKHR> image) {
+ return image.first == bufferId;
+ });
+}
+
+// FlushTracer implementation
+GLESRenderEngine::FlushTracer::FlushTracer(GLESRenderEngine* engine) : mEngine(engine) {
+ mThread = std::thread(&GLESRenderEngine::FlushTracer::loop, this);
+}
+
+GLESRenderEngine::FlushTracer::~FlushTracer() {
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mRunning = false;
+ }
+ mCondition.notify_all();
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+}
+
+void GLESRenderEngine::FlushTracer::queueSync(EGLSyncKHR sync) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ char name[64];
+ const uint64_t frameNum = mFramesQueued++;
+ snprintf(name, sizeof(name), "Queueing sync for frame: %lu",
+ static_cast<unsigned long>(frameNum));
+ ATRACE_NAME(name);
+ mQueue.push({sync, frameNum});
+ ATRACE_INT("GPU Frames Outstanding", mQueue.size());
+ mCondition.notify_one();
+}
+
+void GLESRenderEngine::FlushTracer::loop() {
+ while (mRunning) {
+ QueueEntry entry;
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+
+ mCondition.wait(mMutex,
+ [&]() REQUIRES(mMutex) { return !mQueue.empty() || !mRunning; });
+
+ if (!mRunning) {
+ // if mRunning is false, then FlushTracer is being destroyed, so
+ // bail out now.
+ break;
+ }
+ entry = mQueue.front();
+ mQueue.pop();
+ }
+ {
+ char name[64];
+ snprintf(name, sizeof(name), "waiting for frame %lu",
+ static_cast<unsigned long>(entry.mFrameNum));
+ ATRACE_NAME(name);
+ mEngine->waitSync(entry.mSync, 0);
+ }
+ }
+}
+
+void GLESRenderEngine::handleShadow(const FloatRect& casterRect, float casterCornerRadius,
+ const ShadowSettings& settings) {
+ ATRACE_CALL();
+ const float casterZ = settings.length / 2.0f;
+ const GLShadowVertexGenerator shadows(casterRect, casterCornerRadius, casterZ,
+ settings.casterIsTranslucent, settings.ambientColor,
+ settings.spotColor, settings.lightPos,
+ settings.lightRadius);
+
+ // setup mesh for both shadows
+ Mesh mesh = Mesh::Builder()
+ .setPrimitive(Mesh::TRIANGLES)
+ .setVertices(shadows.getVertexCount(), 2 /* size */)
+ .setShadowAttrs()
+ .setIndices(shadows.getIndexCount())
+ .build();
+
+ Mesh::VertexArray<vec2> position = mesh.getPositionArray<vec2>();
+ Mesh::VertexArray<vec4> shadowColor = mesh.getShadowColorArray<vec4>();
+ Mesh::VertexArray<vec3> shadowParams = mesh.getShadowParamsArray<vec3>();
+ shadows.fillVertices(position, shadowColor, shadowParams);
+ shadows.fillIndices(mesh.getIndicesArray());
+
+ mState.cornerRadius = 0.0f;
+ mState.drawShadows = true;
+ setupLayerTexturing(mShadowTexture.getTexture());
+ drawMesh(mesh);
+ mState.drawShadows = false;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLESRenderEngine.h b/media/libstagefright/renderfright/gl/GLESRenderEngine.h
new file mode 100644
index 0000000..2c6eae2
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLESRenderEngine.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_GLESRENDERENGINE_H_
+#define SF_GLESRENDERENGINE_H_
+
+#include <condition_variable>
+#include <deque>
+#include <mutex>
+#include <queue>
+#include <thread>
+#include <unordered_map>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES2/gl2.h>
+#include <android-base/thread_annotations.h>
+#include <renderengine/RenderEngine.h>
+#include <renderengine/private/Description.h>
+#include <sys/types.h>
+#include "GLShadowTexture.h"
+#include "ImageManager.h"
+
+#define EGL_NO_CONFIG ((EGLConfig)0)
+
+namespace android {
+
+namespace renderengine {
+
+class Mesh;
+class Texture;
+
+namespace gl {
+
+class GLImage;
+class BlurFilter;
+
+class GLESRenderEngine : public impl::RenderEngine {
+public:
+ static std::unique_ptr<GLESRenderEngine> create(const RenderEngineCreationArgs& args);
+
+ GLESRenderEngine(const RenderEngineCreationArgs& args, EGLDisplay display, EGLConfig config,
+ EGLContext ctxt, EGLSurface stub, EGLContext protectedContext,
+ EGLSurface protectedStub);
+ ~GLESRenderEngine() override EXCLUDES(mRenderingMutex);
+
+ void primeCache() const override;
+ void genTextures(size_t count, uint32_t* names) override;
+ void deleteTextures(size_t count, uint32_t const* names) override;
+ void bindExternalTextureImage(uint32_t texName, const Image& image) override;
+ status_t bindExternalTextureBuffer(uint32_t texName, const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& fence) EXCLUDES(mRenderingMutex);
+ void cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) EXCLUDES(mRenderingMutex);
+ void unbindExternalTextureBuffer(uint64_t bufferId) EXCLUDES(mRenderingMutex);
+ status_t bindFrameBuffer(Framebuffer* framebuffer) override;
+ void unbindFrameBuffer(Framebuffer* framebuffer) override;
+
+ bool isProtected() const override { return mInProtectedContext; }
+ bool supportsProtectedContent() const override;
+ bool useProtectedContext(bool useProtectedContext) override;
+ status_t drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer, const bool useFramebufferCache,
+ base::unique_fd&& bufferFence, base::unique_fd* drawFence) override;
+ bool cleanupPostRender(CleanupMode mode) override;
+
+ EGLDisplay getEGLDisplay() const { return mEGLDisplay; }
+ // Creates an output image for rendering to
+ EGLImageKHR createFramebufferImageIfNeeded(ANativeWindowBuffer* nativeBuffer, bool isProtected,
+ bool useFramebufferCache)
+ EXCLUDES(mFramebufferImageCacheMutex);
+
+ // Test-only methods
+ // Returns true iff mImageCache contains an image keyed by bufferId
+ bool isImageCachedForTesting(uint64_t bufferId) EXCLUDES(mRenderingMutex);
+ // Returns true iff texName was previously generated by RenderEngine and was
+ // not destroyed.
+ bool isTextureNameKnownForTesting(uint32_t texName);
+ // Returns the buffer ID of the content bound to texName, or nullopt if no
+ // such mapping exists.
+ std::optional<uint64_t> getBufferIdForTextureNameForTesting(uint32_t texName);
+ // Returns true iff mFramebufferImageCache contains an image keyed by bufferId
+ bool isFramebufferImageCachedForTesting(uint64_t bufferId)
+ EXCLUDES(mFramebufferImageCacheMutex);
+ // These are wrappers around public methods above, but exposing Barrier
+ // objects so that tests can block.
+ std::shared_ptr<ImageManager::Barrier> cacheExternalTextureBufferForTesting(
+ const sp<GraphicBuffer>& buffer);
+ std::shared_ptr<ImageManager::Barrier> unbindExternalTextureBufferForTesting(uint64_t bufferId);
+
+protected:
+ Framebuffer* getFramebufferForDrawing() override;
+ void dump(std::string& result) override EXCLUDES(mRenderingMutex)
+ EXCLUDES(mFramebufferImageCacheMutex);
+ size_t getMaxTextureSize() const override;
+ size_t getMaxViewportDims() const override;
+
+private:
+ enum GlesVersion {
+ GLES_VERSION_1_0 = 0x10000,
+ GLES_VERSION_1_1 = 0x10001,
+ GLES_VERSION_2_0 = 0x20000,
+ GLES_VERSION_3_0 = 0x30000,
+ };
+
+ static EGLConfig chooseEglConfig(EGLDisplay display, int format, bool logConfig);
+ static GlesVersion parseGlesVersion(const char* str);
+ static EGLContext createEglContext(EGLDisplay display, EGLConfig config,
+ EGLContext shareContext, bool useContextPriority,
+ Protection protection);
+ static EGLSurface createStubEglPbufferSurface(EGLDisplay display, EGLConfig config,
+ int hwcFormat, Protection protection);
+ std::unique_ptr<Framebuffer> createFramebuffer();
+ std::unique_ptr<Image> createImage();
+ void checkErrors() const;
+ void checkErrors(const char* tag) const;
+ void setScissor(const Rect& region);
+ void disableScissor();
+ bool waitSync(EGLSyncKHR sync, EGLint flags);
+ status_t cacheExternalTextureBufferInternal(const sp<GraphicBuffer>& buffer)
+ EXCLUDES(mRenderingMutex);
+ void unbindExternalTextureBufferInternal(uint64_t bufferId) EXCLUDES(mRenderingMutex);
+
+ // A data space is considered HDR data space if it has BT2020 color space
+ // with PQ or HLG transfer function.
+ bool isHdrDataSpace(const ui::Dataspace dataSpace) const;
+ bool needsXYZTransformMatrix() const;
+ // Defines the viewport, and sets the projection matrix to the projection
+ // defined by the clip.
+ void setViewportAndProjection(Rect viewport, Rect clip);
+ // Evicts stale images from the buffer cache.
+ void evictImages(const std::vector<LayerSettings>& layers);
+ // Computes the cropping window for the layer and sets up cropping
+ // coordinates for the mesh.
+ FloatRect setupLayerCropping(const LayerSettings& layer, Mesh& mesh);
+
+ // We do a special handling for rounded corners when it's possible to turn off blending
+ // for the majority of the layer. The rounded corners needs to turn on blending such that
+ // we can set the alpha value correctly, however, only the corners need this, and since
+ // blending is an expensive operation, we want to turn off blending when it's not necessary.
+ void handleRoundedCorners(const DisplaySettings& display, const LayerSettings& layer,
+ const Mesh& mesh);
+ base::unique_fd flush();
+ bool finish();
+ bool waitFence(base::unique_fd fenceFd);
+ void clearWithColor(float red, float green, float blue, float alpha);
+ void fillRegionWithColor(const Region& region, float red, float green, float blue, float alpha);
+ void handleShadow(const FloatRect& casterRect, float casterCornerRadius,
+ const ShadowSettings& shadowSettings);
+ void setupLayerBlending(bool premultipliedAlpha, bool opaque, bool disableTexture,
+ const half4& color, float cornerRadius);
+ void setupLayerTexturing(const Texture& texture);
+ void setupFillWithColor(float r, float g, float b, float a);
+ void setColorTransform(const mat4& colorTransform);
+ void disableTexturing();
+ void disableBlending();
+ void setupCornerRadiusCropSize(float width, float height);
+
+ // HDR and color management related functions and state
+ void setSourceY410BT2020(bool enable);
+ void setSourceDataSpace(ui::Dataspace source);
+ void setOutputDataSpace(ui::Dataspace dataspace);
+ void setDisplayMaxLuminance(const float maxLuminance);
+
+ // drawing
+ void drawMesh(const Mesh& mesh);
+
+ EGLDisplay mEGLDisplay;
+ EGLConfig mEGLConfig;
+ EGLContext mEGLContext;
+ EGLSurface mStubSurface;
+ EGLContext mProtectedEGLContext;
+ EGLSurface mProtectedStubSurface;
+ GLint mMaxViewportDims[2];
+ GLint mMaxTextureSize;
+ GLuint mVpWidth;
+ GLuint mVpHeight;
+ Description mState;
+ GLShadowTexture mShadowTexture;
+
+ mat4 mSrgbToXyz;
+ mat4 mDisplayP3ToXyz;
+ mat4 mBt2020ToXyz;
+ mat4 mXyzToSrgb;
+ mat4 mXyzToDisplayP3;
+ mat4 mXyzToBt2020;
+ mat4 mSrgbToDisplayP3;
+ mat4 mSrgbToBt2020;
+ mat4 mDisplayP3ToSrgb;
+ mat4 mDisplayP3ToBt2020;
+ mat4 mBt2020ToSrgb;
+ mat4 mBt2020ToDisplayP3;
+
+ bool mInProtectedContext = false;
+ // If set to true, then enables tracing flush() and finish() to systrace.
+ bool mTraceGpuCompletion = false;
+ // Maximum size of mFramebufferImageCache. If more images would be cached, then (approximately)
+ // the last recently used buffer should be kicked out.
+ uint32_t mFramebufferImageCacheSize = 0;
+
+ // Cache of output images, keyed by corresponding GraphicBuffer ID.
+ std::deque<std::pair<uint64_t, EGLImageKHR>> mFramebufferImageCache
+ GUARDED_BY(mFramebufferImageCacheMutex);
+ // The only reason why we have this mutex is so that we don't segfault when
+ // dumping info.
+ std::mutex mFramebufferImageCacheMutex;
+
+ // Current dataspace of layer being rendered
+ ui::Dataspace mDataSpace = ui::Dataspace::UNKNOWN;
+
+ // Current output dataspace of the render engine
+ ui::Dataspace mOutputDataSpace = ui::Dataspace::UNKNOWN;
+
+ // Whether device supports color management, currently color management
+ // supports sRGB, DisplayP3 color spaces.
+ const bool mUseColorManagement = false;
+
+ // Cache of GL images that we'll store per GraphicBuffer ID
+ std::unordered_map<uint64_t, std::unique_ptr<Image>> mImageCache GUARDED_BY(mRenderingMutex);
+ std::unordered_map<uint32_t, std::optional<uint64_t>> mTextureView;
+
+ // Mutex guarding rendering operations, so that:
+ // 1. GL operations aren't interleaved, and
+ // 2. Internal state related to rendering that is potentially modified by
+ // multiple threads is guaranteed thread-safe.
+ std::mutex mRenderingMutex;
+
+ std::unique_ptr<Framebuffer> mDrawingBuffer;
+ // this is a 1x1 RGB buffer, but over-allocate in case a driver wants more
+ // memory or if it needs to satisfy alignment requirements. In this case:
+ // assume that each channel requires 4 bytes, and add 3 additional bytes to
+ // ensure that we align on a word. Allocating 16 bytes will provide a
+ // guarantee that we don't clobber memory.
+ uint32_t mPlaceholderDrawBuffer[4];
+ // Placeholder buffer and image, similar to mPlaceholderDrawBuffer, but
+ // instead these are intended for cleaning up texture memory with the
+ // GL_TEXTURE_EXTERNAL_OES target.
+ ANativeWindowBuffer* mPlaceholderBuffer = nullptr;
+ EGLImage mPlaceholderImage = EGL_NO_IMAGE_KHR;
+ sp<Fence> mLastDrawFence;
+ // Store a separate boolean checking if prior resources were cleaned up, as
+ // devices that don't support native sync fences can't rely on a last draw
+ // fence that doesn't exist.
+ bool mPriorResourcesCleaned = true;
+
+ // Blur effect processor, only instantiated when a layer requests it.
+ BlurFilter* mBlurFilter = nullptr;
+
+ class FlushTracer {
+ public:
+ FlushTracer(GLESRenderEngine* engine);
+ ~FlushTracer();
+ void queueSync(EGLSyncKHR sync) EXCLUDES(mMutex);
+
+ struct QueueEntry {
+ EGLSyncKHR mSync = nullptr;
+ uint64_t mFrameNum = 0;
+ };
+
+ private:
+ void loop();
+ GLESRenderEngine* const mEngine;
+ std::thread mThread;
+ std::condition_variable_any mCondition;
+ std::mutex mMutex;
+ std::queue<QueueEntry> mQueue GUARDED_BY(mMutex);
+ uint64_t mFramesQueued GUARDED_BY(mMutex) = 0;
+ bool mRunning = true;
+ };
+ friend class FlushTracer;
+ friend class ImageManager;
+ friend class GLFramebuffer;
+ friend class BlurFilter;
+ friend class GenericProgram;
+ std::unique_ptr<FlushTracer> mFlushTracer;
+ std::unique_ptr<ImageManager> mImageManager = std::make_unique<ImageManager>(this);
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
+
+#endif /* SF_GLESRENDERENGINE_H_ */
diff --git a/media/libstagefright/renderfright/gl/GLExtensions.cpp b/media/libstagefright/renderfright/gl/GLExtensions.cpp
new file mode 100644
index 0000000..2924b0e
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLExtensions.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GLExtensions.h"
+
+#include <string>
+#include <unordered_set>
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+ANDROID_SINGLETON_STATIC_INSTANCE(android::renderengine::gl::GLExtensions)
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+namespace {
+
+class ExtensionSet {
+public:
+ ExtensionSet(const char* extensions) {
+ char const* curr = extensions;
+ char const* head = curr;
+ do {
+ head = strchr(curr, ' ');
+ size_t len = head ? head - curr : strlen(curr);
+ if (len > 0) {
+ mExtensions.emplace(curr, len);
+ }
+ curr = head + 1;
+ } while (head);
+ }
+
+ bool hasExtension(const char* extension) const { return mExtensions.count(extension) > 0; }
+
+private:
+ std::unordered_set<std::string> mExtensions;
+};
+
+} // anonymous namespace
+
+void GLExtensions::initWithGLStrings(GLubyte const* vendor, GLubyte const* renderer,
+ GLubyte const* version, GLubyte const* extensions) {
+ mVendor = (char const*)vendor;
+ mRenderer = (char const*)renderer;
+ mVersion = (char const*)version;
+ mExtensions = (char const*)extensions;
+
+ ExtensionSet extensionSet(mExtensions.c_str());
+ if (extensionSet.hasExtension("GL_EXT_protected_textures")) {
+ mHasProtectedTexture = true;
+ }
+}
+
+char const* GLExtensions::getVendor() const {
+ return mVendor.string();
+}
+
+char const* GLExtensions::getRenderer() const {
+ return mRenderer.string();
+}
+
+char const* GLExtensions::getVersion() const {
+ return mVersion.string();
+}
+
+char const* GLExtensions::getExtensions() const {
+ return mExtensions.string();
+}
+
+void GLExtensions::initWithEGLStrings(char const* eglVersion, char const* eglExtensions) {
+ mEGLVersion = eglVersion;
+ mEGLExtensions = eglExtensions;
+
+ ExtensionSet extensionSet(eglExtensions);
+
+ // EGL_ANDROIDX_no_config_context is an experimental extension with no
+ // written specification. It will be replaced by something more formal.
+ // SurfaceFlinger is using it to allow a single EGLContext to render to
+ // both a 16-bit primary display framebuffer and a 32-bit virtual display
+ // framebuffer.
+ //
+ // EGL_KHR_no_config_context is official extension to allow creating a
+ // context that works with any surface of a display.
+ if (extensionSet.hasExtension("EGL_ANDROIDX_no_config_context") ||
+ extensionSet.hasExtension("EGL_KHR_no_config_context")) {
+ mHasNoConfigContext = true;
+ }
+
+ if (extensionSet.hasExtension("EGL_ANDROID_native_fence_sync")) {
+ mHasNativeFenceSync = true;
+ }
+ if (extensionSet.hasExtension("EGL_KHR_fence_sync")) {
+ mHasFenceSync = true;
+ }
+ if (extensionSet.hasExtension("EGL_KHR_wait_sync")) {
+ mHasWaitSync = true;
+ }
+ if (extensionSet.hasExtension("EGL_EXT_protected_content")) {
+ mHasProtectedContent = true;
+ }
+ if (extensionSet.hasExtension("EGL_IMG_context_priority")) {
+ mHasContextPriority = true;
+ }
+ if (extensionSet.hasExtension("EGL_KHR_surfaceless_context")) {
+ mHasSurfacelessContext = true;
+ }
+}
+
+char const* GLExtensions::getEGLVersion() const {
+ return mEGLVersion.string();
+}
+
+char const* GLExtensions::getEGLExtensions() const {
+ return mEGLExtensions.string();
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLExtensions.h b/media/libstagefright/renderfright/gl/GLExtensions.h
new file mode 100644
index 0000000..ef00009
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLExtensions.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SF_GLEXTENSION_H
+#define ANDROID_SF_GLEXTENSION_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <utils/Singleton.h>
+#include <utils/String8.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLExtensions : public Singleton<GLExtensions> {
+public:
+ bool hasNoConfigContext() const { return mHasNoConfigContext; }
+ bool hasNativeFenceSync() const { return mHasNativeFenceSync; }
+ bool hasFenceSync() const { return mHasFenceSync; }
+ bool hasWaitSync() const { return mHasWaitSync; }
+ bool hasProtectedContent() const { return mHasProtectedContent; }
+ bool hasContextPriority() const { return mHasContextPriority; }
+ bool hasSurfacelessContext() const { return mHasSurfacelessContext; }
+ bool hasProtectedTexture() const { return mHasProtectedTexture; }
+
+ void initWithGLStrings(GLubyte const* vendor, GLubyte const* renderer, GLubyte const* version,
+ GLubyte const* extensions);
+ char const* getVendor() const;
+ char const* getRenderer() const;
+ char const* getVersion() const;
+ char const* getExtensions() const;
+
+ void initWithEGLStrings(char const* eglVersion, char const* eglExtensions);
+ char const* getEGLVersion() const;
+ char const* getEGLExtensions() const;
+
+protected:
+ GLExtensions() = default;
+
+private:
+ friend class Singleton<GLExtensions>;
+
+ bool mHasNoConfigContext = false;
+ bool mHasNativeFenceSync = false;
+ bool mHasFenceSync = false;
+ bool mHasWaitSync = false;
+ bool mHasProtectedContent = false;
+ bool mHasContextPriority = false;
+ bool mHasSurfacelessContext = false;
+ bool mHasProtectedTexture = false;
+
+ String8 mVendor;
+ String8 mRenderer;
+ String8 mVersion;
+ String8 mExtensions;
+ String8 mEGLVersion;
+ String8 mEGLExtensions;
+
+ GLExtensions(const GLExtensions&);
+ GLExtensions& operator=(const GLExtensions&);
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
+
+#endif // ANDROID_SF_GLEXTENSION_H
diff --git a/media/libstagefright/renderfright/gl/GLFramebuffer.cpp b/media/libstagefright/renderfright/gl/GLFramebuffer.cpp
new file mode 100644
index 0000000..383486b
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLFramebuffer.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "GLFramebuffer.h"
+
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <GLES2/gl2ext.h>
+#include <GLES3/gl3.h>
+#include <gui/DebugEGLImageTracker.h>
+#include <nativebase/nativebase.h>
+#include <utils/Trace.h>
+#include "GLESRenderEngine.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GLFramebuffer::GLFramebuffer(GLESRenderEngine& engine)
+ : mEngine(engine), mEGLDisplay(engine.getEGLDisplay()), mEGLImage(EGL_NO_IMAGE_KHR) {
+ glGenTextures(1, &mTextureName);
+ glGenFramebuffers(1, &mFramebufferName);
+}
+
+GLFramebuffer::~GLFramebuffer() {
+ glDeleteFramebuffers(1, &mFramebufferName);
+ glDeleteTextures(1, &mTextureName);
+}
+
+bool GLFramebuffer::setNativeWindowBuffer(ANativeWindowBuffer* nativeBuffer, bool isProtected,
+ const bool useFramebufferCache) {
+ ATRACE_CALL();
+ if (mEGLImage != EGL_NO_IMAGE_KHR) {
+ if (!usingFramebufferCache) {
+ eglDestroyImageKHR(mEGLDisplay, mEGLImage);
+ DEBUG_EGL_IMAGE_TRACKER_DESTROY();
+ }
+ mEGLImage = EGL_NO_IMAGE_KHR;
+ mBufferWidth = 0;
+ mBufferHeight = 0;
+ }
+
+ if (nativeBuffer) {
+ mEGLImage = mEngine.createFramebufferImageIfNeeded(nativeBuffer, isProtected,
+ useFramebufferCache);
+ if (mEGLImage == EGL_NO_IMAGE_KHR) {
+ return false;
+ }
+ usingFramebufferCache = useFramebufferCache;
+ mBufferWidth = nativeBuffer->width;
+ mBufferHeight = nativeBuffer->height;
+ }
+ return true;
+}
+
+void GLFramebuffer::allocateBuffers(uint32_t width, uint32_t height, void* data) {
+ ATRACE_CALL();
+
+ glBindTexture(GL_TEXTURE_2D, mTextureName);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
+
+ mBufferHeight = height;
+ mBufferWidth = width;
+ mEngine.checkErrors("Allocating Fbo texture");
+
+ bind();
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mTextureName, 0);
+ mStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+ unbind();
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ if (mStatus != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Frame buffer is not complete. Error %d", mStatus);
+ }
+}
+
+void GLFramebuffer::bind() const {
+ glBindFramebuffer(GL_FRAMEBUFFER, mFramebufferName);
+}
+
+void GLFramebuffer::bindAsReadBuffer() const {
+ glBindFramebuffer(GL_READ_FRAMEBUFFER, mFramebufferName);
+}
+
+void GLFramebuffer::bindAsDrawBuffer() const {
+ glBindFramebuffer(GL_DRAW_FRAMEBUFFER, mFramebufferName);
+}
+
+void GLFramebuffer::unbind() const {
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLFramebuffer.h b/media/libstagefright/renderfright/gl/GLFramebuffer.h
new file mode 100644
index 0000000..6757695
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLFramebuffer.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES2/gl2.h>
+#include <renderengine/Framebuffer.h>
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLESRenderEngine;
+
+class GLFramebuffer : public renderengine::Framebuffer {
+public:
+ explicit GLFramebuffer(GLESRenderEngine& engine);
+ explicit GLFramebuffer(GLESRenderEngine& engine, bool multiTarget);
+ ~GLFramebuffer() override;
+
+ bool setNativeWindowBuffer(ANativeWindowBuffer* nativeBuffer, bool isProtected,
+ const bool useFramebufferCache) override;
+ void allocateBuffers(uint32_t width, uint32_t height, void* data = nullptr);
+ EGLImageKHR getEGLImage() const { return mEGLImage; }
+ uint32_t getTextureName() const { return mTextureName; }
+ uint32_t getFramebufferName() const { return mFramebufferName; }
+ int32_t getBufferHeight() const { return mBufferHeight; }
+ int32_t getBufferWidth() const { return mBufferWidth; }
+ GLenum getStatus() const { return mStatus; }
+ void bind() const;
+ void bindAsReadBuffer() const;
+ void bindAsDrawBuffer() const;
+ void unbind() const;
+
+private:
+ GLESRenderEngine& mEngine;
+ EGLDisplay mEGLDisplay;
+ EGLImageKHR mEGLImage;
+ bool usingFramebufferCache = false;
+ GLenum mStatus = GL_FRAMEBUFFER_UNSUPPORTED;
+ uint32_t mTextureName, mFramebufferName;
+
+ int32_t mBufferHeight = 0;
+ int32_t mBufferWidth = 0;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLImage.cpp b/media/libstagefright/renderfright/gl/GLImage.cpp
new file mode 100644
index 0000000..8497721
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLImage.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "GLImage.h"
+
+#include <vector>
+
+#include <gui/DebugEGLImageTracker.h>
+#include <log/log.h>
+#include <utils/Trace.h>
+#include "GLESRenderEngine.h"
+#include "GLExtensions.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+static std::vector<EGLint> buildAttributeList(bool isProtected) {
+ std::vector<EGLint> attrs;
+ attrs.reserve(16);
+
+ attrs.push_back(EGL_IMAGE_PRESERVED_KHR);
+ attrs.push_back(EGL_TRUE);
+
+ if (isProtected && GLExtensions::getInstance().hasProtectedContent()) {
+ attrs.push_back(EGL_PROTECTED_CONTENT_EXT);
+ attrs.push_back(EGL_TRUE);
+ }
+
+ attrs.push_back(EGL_NONE);
+
+ return attrs;
+}
+
+GLImage::GLImage(const GLESRenderEngine& engine) : mEGLDisplay(engine.getEGLDisplay()) {}
+
+GLImage::~GLImage() {
+ setNativeWindowBuffer(nullptr, false);
+}
+
+bool GLImage::setNativeWindowBuffer(ANativeWindowBuffer* buffer, bool isProtected) {
+ ATRACE_CALL();
+ if (mEGLImage != EGL_NO_IMAGE_KHR) {
+ if (!eglDestroyImageKHR(mEGLDisplay, mEGLImage)) {
+ ALOGE("failed to destroy image: %#x", eglGetError());
+ }
+ DEBUG_EGL_IMAGE_TRACKER_DESTROY();
+ mEGLImage = EGL_NO_IMAGE_KHR;
+ }
+
+ if (buffer) {
+ std::vector<EGLint> attrs = buildAttributeList(isProtected);
+ mEGLImage = eglCreateImageKHR(mEGLDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<EGLClientBuffer>(buffer), attrs.data());
+ if (mEGLImage == EGL_NO_IMAGE_KHR) {
+ ALOGE("failed to create EGLImage: %#x", eglGetError());
+ return false;
+ }
+ DEBUG_EGL_IMAGE_TRACKER_CREATE();
+ mProtected = isProtected;
+ }
+
+ return true;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLImage.h b/media/libstagefright/renderfright/gl/GLImage.h
new file mode 100644
index 0000000..59d6ce3
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLImage.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <android-base/macros.h>
+#include <renderengine/Image.h>
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLESRenderEngine;
+
+class GLImage : public renderengine::Image {
+public:
+ explicit GLImage(const GLESRenderEngine& engine);
+ ~GLImage() override;
+
+ bool setNativeWindowBuffer(ANativeWindowBuffer* buffer, bool isProtected) override;
+
+ EGLImageKHR getEGLImage() const { return mEGLImage; }
+ bool isProtected() const { return mProtected; }
+
+private:
+ EGLDisplay mEGLDisplay;
+ EGLImageKHR mEGLImage = EGL_NO_IMAGE_KHR;
+ bool mProtected = false;
+
+ DISALLOW_COPY_AND_ASSIGN(GLImage);
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLShadowTexture.cpp b/media/libstagefright/renderfright/gl/GLShadowTexture.cpp
new file mode 100644
index 0000000..2423a34
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLShadowTexture.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES3/gl3.h>
+
+#include "GLShadowTexture.h"
+#include "GLSkiaShadowPort.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GLShadowTexture::GLShadowTexture() {
+ fillShadowTextureData(mTextureData, SHADOW_TEXTURE_WIDTH);
+
+ glGenTextures(1, &mName);
+ glBindTexture(GL_TEXTURE_2D, mName);
+ glTexImage2D(GL_TEXTURE_2D, 0 /* base image level */, GL_ALPHA, SHADOW_TEXTURE_WIDTH,
+ SHADOW_TEXTURE_HEIGHT, 0 /* border */, GL_ALPHA, GL_UNSIGNED_BYTE, mTextureData);
+ mTexture.init(Texture::TEXTURE_2D, mName);
+ mTexture.setFiltering(true);
+ mTexture.setDimensions(SHADOW_TEXTURE_WIDTH, 1);
+}
+
+GLShadowTexture::~GLShadowTexture() {
+ glDeleteTextures(1, &mName);
+}
+
+const Texture& GLShadowTexture::getTexture() {
+ return mTexture;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLShadowTexture.h b/media/libstagefright/renderfright/gl/GLShadowTexture.h
new file mode 100644
index 0000000..250a9d7
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLShadowTexture.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <renderengine/Texture.h>
+#include <cstdint>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLShadowTexture {
+public:
+ GLShadowTexture();
+ ~GLShadowTexture();
+
+ const Texture& getTexture();
+
+private:
+ static constexpr int SHADOW_TEXTURE_WIDTH = 128;
+ static constexpr int SHADOW_TEXTURE_HEIGHT = 1;
+
+ GLuint mName;
+ Texture mTexture;
+ uint8_t mTextureData[SHADOW_TEXTURE_WIDTH];
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.cpp b/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.cpp
new file mode 100644
index 0000000..3181f9b
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/Mesh.h>
+
+#include <math/vec4.h>
+
+#include <ui/Rect.h>
+#include <ui/Transform.h>
+
+#include "GLShadowVertexGenerator.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GLShadowVertexGenerator::GLShadowVertexGenerator(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent, const vec4& ambientColor,
+ const vec4& spotColor, const vec3& lightPosition,
+ float lightRadius) {
+ mDrawAmbientShadow = ambientColor.a > 0.f;
+ mDrawSpotShadow = spotColor.a > 0.f;
+
+ // Generate geometries and find number of vertices to generate
+ if (mDrawAmbientShadow) {
+ mAmbientShadowGeometry = getAmbientShadowGeometry(casterRect, casterCornerRadius, casterZ,
+ casterIsTranslucent, ambientColor);
+ mAmbientShadowVertexCount = getVertexCountForGeometry(*mAmbientShadowGeometry.get());
+ mAmbientShadowIndexCount = getIndexCountForGeometry(*mAmbientShadowGeometry.get());
+ } else {
+ mAmbientShadowVertexCount = 0;
+ mAmbientShadowIndexCount = 0;
+ }
+
+ if (mDrawSpotShadow) {
+ mSpotShadowGeometry =
+ getSpotShadowGeometry(casterRect, casterCornerRadius, casterZ, casterIsTranslucent,
+ spotColor, lightPosition, lightRadius);
+ mSpotShadowVertexCount = getVertexCountForGeometry(*mSpotShadowGeometry.get());
+ mSpotShadowIndexCount = getIndexCountForGeometry(*mSpotShadowGeometry.get());
+ } else {
+ mSpotShadowVertexCount = 0;
+ mSpotShadowIndexCount = 0;
+ }
+}
+
+size_t GLShadowVertexGenerator::getVertexCount() const {
+ return mAmbientShadowVertexCount + mSpotShadowVertexCount;
+}
+
+size_t GLShadowVertexGenerator::getIndexCount() const {
+ return mAmbientShadowIndexCount + mSpotShadowIndexCount;
+}
+
+void GLShadowVertexGenerator::fillVertices(Mesh::VertexArray<vec2>& position,
+ Mesh::VertexArray<vec4>& color,
+ Mesh::VertexArray<vec3>& params) const {
+ if (mDrawAmbientShadow) {
+ fillVerticesForGeometry(*mAmbientShadowGeometry.get(), mAmbientShadowVertexCount, position,
+ color, params);
+ }
+ if (mDrawSpotShadow) {
+ fillVerticesForGeometry(*mSpotShadowGeometry.get(), mSpotShadowVertexCount,
+ Mesh::VertexArray<vec2>(position, mAmbientShadowVertexCount),
+ Mesh::VertexArray<vec4>(color, mAmbientShadowVertexCount),
+ Mesh::VertexArray<vec3>(params, mAmbientShadowVertexCount));
+ }
+}
+
+void GLShadowVertexGenerator::fillIndices(uint16_t* indices) const {
+ if (mDrawAmbientShadow) {
+ fillIndicesForGeometry(*mAmbientShadowGeometry.get(), mAmbientShadowIndexCount,
+ 0 /* starting vertex offset */, indices);
+ }
+ if (mDrawSpotShadow) {
+ fillIndicesForGeometry(*mSpotShadowGeometry.get(), mSpotShadowIndexCount,
+ mAmbientShadowVertexCount /* starting vertex offset */,
+ &(indices[mAmbientShadowIndexCount]));
+ }
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.h b/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.h
new file mode 100644
index 0000000..112f976
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <math/vec4.h>
+#include <ui/Rect.h>
+
+#include "GLSkiaShadowPort.h"
+
+namespace android {
+namespace renderengine {
+
+class Mesh;
+
+namespace gl {
+
+/**
+ * Generates gl attributes required to draw shadow spot and/or ambient shadows.
+ *
+ * Each shadow can support different colors. This class generates three vertex attributes for
+ * each shadow, its position, color and shadow params(offset and distance). These can be sent
+ * using a single glDrawElements call.
+ */
+class GLShadowVertexGenerator {
+public:
+ GLShadowVertexGenerator(const FloatRect& casterRect, float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent, const vec4& ambientColor,
+ const vec4& spotColor, const vec3& lightPosition, float lightRadius);
+ ~GLShadowVertexGenerator() = default;
+
+ size_t getVertexCount() const;
+ size_t getIndexCount() const;
+ void fillVertices(Mesh::VertexArray<vec2>& position, Mesh::VertexArray<vec4>& color,
+ Mesh::VertexArray<vec3>& params) const;
+ void fillIndices(uint16_t* indices) const;
+
+private:
+ bool mDrawAmbientShadow;
+ std::unique_ptr<Geometry> mAmbientShadowGeometry;
+ int mAmbientShadowVertexCount = 0;
+ int mAmbientShadowIndexCount = 0;
+
+ bool mDrawSpotShadow;
+ std::unique_ptr<Geometry> mSpotShadowGeometry;
+ int mSpotShadowVertexCount = 0;
+ int mSpotShadowIndexCount = 0;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLSkiaShadowPort.cpp b/media/libstagefright/renderfright/gl/GLSkiaShadowPort.cpp
new file mode 100644
index 0000000..da8b435
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLSkiaShadowPort.cpp
@@ -0,0 +1,656 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <math/vec4.h>
+
+#include <renderengine/Mesh.h>
+
+#include <ui/Rect.h>
+#include <ui/Transform.h>
+
+#include <utils/Log.h>
+
+#include "GLSkiaShadowPort.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+/**
+ * The shadow geometry logic and vertex generation code has been ported from skia shadow
+ * fast path OpenGL implementation to draw shadows around rects and rounded rects including
+ * circles.
+ *
+ * path: skia/src/gpu/GrRenderTargetContext.cpp GrRenderTargetContext::drawFastShadow
+ *
+ * Modifications made:
+ * - Switched to using std lib math functions
+ * - Fall off function is implemented in vertex shader rather than a shadow texture
+ * - Removed transformations applied on the caster rect since the caster will be in local
+ * coordinate space and will be transformed by the vertex shader.
+ */
+
+static inline float divide_and_pin(float numer, float denom, float min, float max) {
+ if (denom == 0.0f) return min;
+ return std::clamp(numer / denom, min, max);
+}
+
+static constexpr auto SK_ScalarSqrt2 = 1.41421356f;
+static constexpr auto kAmbientHeightFactor = 1.0f / 128.0f;
+static constexpr auto kAmbientGeomFactor = 64.0f;
+// Assuming that we have a light height of 600 for the spot shadow,
+// the spot values will reach their maximum at a height of approximately 292.3077.
+// We'll round up to 300 to keep it simple.
+static constexpr auto kMaxAmbientRadius = 300 * kAmbientHeightFactor * kAmbientGeomFactor;
+
+inline float AmbientBlurRadius(float height) {
+ return std::min(height * kAmbientHeightFactor * kAmbientGeomFactor, kMaxAmbientRadius);
+}
+inline float AmbientRecipAlpha(float height) {
+ return 1.0f + std::max(height * kAmbientHeightFactor, 0.0f);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Circle Data
+//
+// We have two possible cases for geometry for a circle:
+
+// In the case of a normal fill, we draw geometry for the circle as an octagon.
+static const uint16_t gFillCircleIndices[] = {
+ // enter the octagon
+ // clang-format off
+ 0, 1, 8, 1, 2, 8,
+ 2, 3, 8, 3, 4, 8,
+ 4, 5, 8, 5, 6, 8,
+ 6, 7, 8, 7, 0, 8,
+ // clang-format on
+};
+
+// For stroked circles, we use two nested octagons.
+static const uint16_t gStrokeCircleIndices[] = {
+ // enter the octagon
+ // clang-format off
+ 0, 1, 9, 0, 9, 8,
+ 1, 2, 10, 1, 10, 9,
+ 2, 3, 11, 2, 11, 10,
+ 3, 4, 12, 3, 12, 11,
+ 4, 5, 13, 4, 13, 12,
+ 5, 6, 14, 5, 14, 13,
+ 6, 7, 15, 6, 15, 14,
+ 7, 0, 8, 7, 8, 15,
+ // clang-format on
+};
+
+#define SK_ARRAY_COUNT(a) (sizeof(a) / sizeof((a)[0]))
+static const int kIndicesPerFillCircle = SK_ARRAY_COUNT(gFillCircleIndices);
+static const int kIndicesPerStrokeCircle = SK_ARRAY_COUNT(gStrokeCircleIndices);
+static const int kVertsPerStrokeCircle = 16;
+static const int kVertsPerFillCircle = 9;
+
+static int circle_type_to_vert_count(bool stroked) {
+ return stroked ? kVertsPerStrokeCircle : kVertsPerFillCircle;
+}
+
+static int circle_type_to_index_count(bool stroked) {
+ return stroked ? kIndicesPerStrokeCircle : kIndicesPerFillCircle;
+}
+
+static const uint16_t* circle_type_to_indices(bool stroked) {
+ return stroked ? gStrokeCircleIndices : gFillCircleIndices;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// RoundRect Data
+//
+// The geometry for a shadow roundrect is similar to a 9-patch:
+// ____________
+// |_|________|_|
+// | | | |
+// | | | |
+// | | | |
+// |_|________|_|
+// |_|________|_|
+//
+// However, each corner is rendered as a fan rather than a simple quad, as below. (The diagram
+// shows the upper part of the upper left corner. The bottom triangle would similarly be split
+// into two triangles.)
+// ________
+// |\ \ |
+// | \ \ |
+// | \\ |
+// | \|
+// --------
+//
+// The center of the fan handles the curve of the corner. For roundrects where the stroke width
+// is greater than the corner radius, the outer triangles blend from the curve to the straight
+// sides. Otherwise these triangles will be degenerate.
+//
+// In the case where the stroke width is greater than the corner radius and the
+// blur radius (overstroke), we add additional geometry to mark out the rectangle in the center.
+// This rectangle extends the coverage values of the center edges of the 9-patch.
+// ____________
+// |_|________|_|
+// | |\ ____ /| |
+// | | | | | |
+// | | |____| | |
+// |_|/______\|_|
+// |_|________|_|
+//
+// For filled rrects we reuse the stroke geometry but add an additional quad to the center.
+
+static const uint16_t gRRectIndices[] = {
+ // clang-format off
+ // overstroke quads
+ // we place this at the beginning so that we can skip these indices when rendering as filled
+ 0, 6, 25, 0, 25, 24,
+ 6, 18, 27, 6, 27, 25,
+ 18, 12, 26, 18, 26, 27,
+ 12, 0, 24, 12, 24, 26,
+
+ // corners
+ 0, 1, 2, 0, 2, 3, 0, 3, 4, 0, 4, 5,
+ 6, 11, 10, 6, 10, 9, 6, 9, 8, 6, 8, 7,
+ 12, 17, 16, 12, 16, 15, 12, 15, 14, 12, 14, 13,
+ 18, 19, 20, 18, 20, 21, 18, 21, 22, 18, 22, 23,
+
+ // edges
+ 0, 5, 11, 0, 11, 6,
+ 6, 7, 19, 6, 19, 18,
+ 18, 23, 17, 18, 17, 12,
+ 12, 13, 1, 12, 1, 0,
+
+ // fill quad
+ // we place this at the end so that we can skip these indices when rendering as stroked
+ 0, 6, 18, 0, 18, 12,
+ // clang-format on
+};
+
+// overstroke count
+static const int kIndicesPerOverstrokeRRect = SK_ARRAY_COUNT(gRRectIndices) - 6;
+// simple stroke count skips overstroke indices
+static const int kIndicesPerStrokeRRect = kIndicesPerOverstrokeRRect - 6 * 4;
+// fill count adds final quad to stroke count
+static const int kIndicesPerFillRRect = kIndicesPerStrokeRRect + 6;
+static const int kVertsPerStrokeRRect = 24;
+static const int kVertsPerOverstrokeRRect = 28;
+static const int kVertsPerFillRRect = 24;
+
+static int rrect_type_to_vert_count(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ return kVertsPerFillRRect;
+ case kStroke_RRectType:
+ return kVertsPerStrokeRRect;
+ case kOverstroke_RRectType:
+ return kVertsPerOverstrokeRRect;
+ }
+ ALOGE("Invalid rect type: %d", type);
+ return -1;
+}
+
+static int rrect_type_to_index_count(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ return kIndicesPerFillRRect;
+ case kStroke_RRectType:
+ return kIndicesPerStrokeRRect;
+ case kOverstroke_RRectType:
+ return kIndicesPerOverstrokeRRect;
+ }
+ ALOGE("Invalid rect type: %d", type);
+ return -1;
+}
+
+static const uint16_t* rrect_type_to_indices(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ case kStroke_RRectType:
+ return gRRectIndices + 6 * 4;
+ case kOverstroke_RRectType:
+ return gRRectIndices;
+ }
+ ALOGE("Invalid rect type: %d", type);
+ return nullptr;
+}
+
+static void fillInCircleVerts(const Geometry& args, bool isStroked,
+ Mesh::VertexArray<vec2>& position,
+ Mesh::VertexArray<vec4>& shadowColor,
+ Mesh::VertexArray<vec3>& shadowParams) {
+ vec4 color = args.fColor;
+ float outerRadius = args.fOuterRadius;
+ float innerRadius = args.fInnerRadius;
+ float blurRadius = args.fBlurRadius;
+ float distanceCorrection = outerRadius / blurRadius;
+
+ const FloatRect& bounds = args.fDevBounds;
+
+ // The inner radius in the vertex data must be specified in normalized space.
+ innerRadius = innerRadius / outerRadius;
+
+ vec2 center = vec2(bounds.getWidth() / 2.0f, bounds.getHeight() / 2.0f);
+ float halfWidth = 0.5f * bounds.getWidth();
+ float octOffset = 0.41421356237f; // sqrt(2) - 1
+ int vertexCount = 0;
+
+ position[vertexCount] = center + vec2(-octOffset * halfWidth, -halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-octOffset, -1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(octOffset * halfWidth, -halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(octOffset, -1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(halfWidth, -octOffset * halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(1, -octOffset, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(halfWidth, octOffset * halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(1, octOffset, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(octOffset * halfWidth, halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(octOffset, 1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-octOffset * halfWidth, halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-octOffset, 1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-halfWidth, octOffset * halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-1, octOffset, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-halfWidth, -octOffset * halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-1, -octOffset, distanceCorrection);
+ vertexCount++;
+
+ if (isStroked) {
+ // compute the inner ring
+
+ // cosine and sine of pi/8
+ float c = 0.923579533f;
+ float s = 0.382683432f;
+ float r = args.fInnerRadius;
+
+ position[vertexCount] = center + vec2(-s * r, -c * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-s * innerRadius, -c * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(s * r, -c * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(s * innerRadius, -c * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(c * r, -s * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(c * innerRadius, -s * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(c * r, s * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(c * innerRadius, s * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(s * r, c * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(s * innerRadius, c * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-s * r, c * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-s * innerRadius, c * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-c * r, s * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-c * innerRadius, s * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-c * r, -s * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-c * innerRadius, -s * innerRadius, distanceCorrection);
+ vertexCount++;
+ } else {
+ // filled
+ position[vertexCount] = center;
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+ }
+}
+
+static void fillInRRectVerts(const Geometry& args, Mesh::VertexArray<vec2>& position,
+ Mesh::VertexArray<vec4>& shadowColor,
+ Mesh::VertexArray<vec3>& shadowParams) {
+ vec4 color = args.fColor;
+ float outerRadius = args.fOuterRadius;
+
+ const FloatRect& bounds = args.fDevBounds;
+
+ float umbraInset = args.fUmbraInset;
+ float minDim = 0.5f * std::min(bounds.getWidth(), bounds.getHeight());
+ if (umbraInset > minDim) {
+ umbraInset = minDim;
+ }
+
+ float xInner[4] = {bounds.left + umbraInset, bounds.right - umbraInset,
+ bounds.left + umbraInset, bounds.right - umbraInset};
+ float xMid[4] = {bounds.left + outerRadius, bounds.right - outerRadius,
+ bounds.left + outerRadius, bounds.right - outerRadius};
+ float xOuter[4] = {bounds.left, bounds.right, bounds.left, bounds.right};
+ float yInner[4] = {bounds.top + umbraInset, bounds.top + umbraInset, bounds.bottom - umbraInset,
+ bounds.bottom - umbraInset};
+ float yMid[4] = {bounds.top + outerRadius, bounds.top + outerRadius,
+ bounds.bottom - outerRadius, bounds.bottom - outerRadius};
+ float yOuter[4] = {bounds.top, bounds.top, bounds.bottom, bounds.bottom};
+
+ float blurRadius = args.fBlurRadius;
+
+ // In the case where we have to inset more for the umbra, our two triangles in the
+ // corner get skewed to a diamond rather than a square. To correct for that,
+ // we also skew the vectors we send to the shader that help define the circle.
+ // By doing so, we end up with a quarter circle in the corner rather than the
+ // elliptical curve.
+
+ // This is a bit magical, but it gives us the correct results at extrema:
+ // a) umbraInset == outerRadius produces an orthogonal vector
+ // b) outerRadius == 0 produces a diagonal vector
+ // And visually the corner looks correct.
+ vec2 outerVec = vec2(outerRadius - umbraInset, -outerRadius - umbraInset);
+ outerVec = normalize(outerVec);
+ // We want the circle edge to fall fractionally along the diagonal at
+ // (sqrt(2)*(umbraInset - outerRadius) + outerRadius)/sqrt(2)*umbraInset
+ //
+ // Setting the components of the diagonal offset to the following value will give us that.
+ float diagVal = umbraInset / (SK_ScalarSqrt2 * (outerRadius - umbraInset) - outerRadius);
+ vec2 diagVec = vec2(diagVal, diagVal);
+ float distanceCorrection = umbraInset / blurRadius;
+
+ int vertexCount = 0;
+ // build corner by corner
+ for (int i = 0; i < 4; ++i) {
+ // inner point
+ position[vertexCount] = vec2(xInner[i], yInner[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+
+ // outer points
+ position[vertexCount] = vec2(xOuter[i], yInner[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, -1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = vec2(xOuter[i], yMid[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(outerVec.x, outerVec.y, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = vec2(xOuter[i], yOuter[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(diagVec.x, diagVec.y, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = vec2(xMid[i], yOuter[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(outerVec.x, outerVec.y, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = vec2(xInner[i], yOuter[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, -1, distanceCorrection);
+ vertexCount++;
+ }
+
+ // Add the additional vertices for overstroked rrects.
+ // Effectively this is an additional stroked rrect, with its
+ // parameters equal to those in the center of the 9-patch. This will
+ // give constant values across this inner ring.
+ if (kOverstroke_RRectType == args.fType) {
+ float inset = umbraInset + args.fInnerRadius;
+
+ // TL
+ position[vertexCount] = vec2(bounds.left + inset, bounds.top + inset);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+
+ // TR
+ position[vertexCount] = vec2(bounds.right - inset, bounds.top + inset);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+
+ // BL
+ position[vertexCount] = vec2(bounds.left + inset, bounds.bottom - inset);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+
+ // BR
+ position[vertexCount] = vec2(bounds.right - inset, bounds.bottom - inset);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+ }
+}
+
+int getVertexCountForGeometry(const Geometry& shadowGeometry) {
+ if (shadowGeometry.fIsCircle) {
+ return circle_type_to_vert_count(shadowGeometry.fType);
+ }
+
+ return rrect_type_to_vert_count(shadowGeometry.fType);
+}
+
+int getIndexCountForGeometry(const Geometry& shadowGeometry) {
+ if (shadowGeometry.fIsCircle) {
+ return circle_type_to_index_count(kStroke_RRectType == shadowGeometry.fType);
+ }
+
+ return rrect_type_to_index_count(shadowGeometry.fType);
+}
+
+void fillVerticesForGeometry(const Geometry& shadowGeometry, int /* vertexCount */,
+ Mesh::VertexArray<vec2> position, Mesh::VertexArray<vec4> shadowColor,
+ Mesh::VertexArray<vec3> shadowParams) {
+ if (shadowGeometry.fIsCircle) {
+ fillInCircleVerts(shadowGeometry, shadowGeometry.fIsStroked, position, shadowColor,
+ shadowParams);
+ } else {
+ fillInRRectVerts(shadowGeometry, position, shadowColor, shadowParams);
+ }
+}
+
+void fillIndicesForGeometry(const Geometry& shadowGeometry, int indexCount,
+ int startingVertexOffset, uint16_t* indices) {
+ if (shadowGeometry.fIsCircle) {
+ const uint16_t* primIndices = circle_type_to_indices(shadowGeometry.fIsStroked);
+ for (int i = 0; i < indexCount; ++i) {
+ indices[i] = primIndices[i] + startingVertexOffset;
+ }
+ } else {
+ const uint16_t* primIndices = rrect_type_to_indices(shadowGeometry.fType);
+ for (int i = 0; i < indexCount; ++i) {
+ indices[i] = primIndices[i] + startingVertexOffset;
+ }
+ }
+}
+
+inline void GetSpotParams(float occluderZ, float lightX, float lightY, float lightZ,
+ float lightRadius, float& blurRadius, float& scale, vec2& translate) {
+ float zRatio = divide_and_pin(occluderZ, lightZ - occluderZ, 0.0f, 0.95f);
+ blurRadius = lightRadius * zRatio;
+ scale = divide_and_pin(lightZ, lightZ - occluderZ, 1.0f, 1.95f);
+ translate.x = -zRatio * lightX;
+ translate.y = -zRatio * lightY;
+}
+
+static std::unique_ptr<Geometry> getShadowGeometry(const vec4& color, const FloatRect& devRect,
+ float devRadius, float blurRadius,
+ float insetWidth) {
+ // An insetWidth > 1/2 rect width or height indicates a simple fill.
+ const bool isCircle = ((devRadius >= devRect.getWidth()) && (devRadius >= devRect.getHeight()));
+
+ FloatRect bounds = devRect;
+ float innerRadius = 0.0f;
+ float outerRadius = devRadius;
+ float umbraInset;
+
+ RRectType type = kFill_RRectType;
+ if (isCircle) {
+ umbraInset = 0;
+ } else {
+ umbraInset = std::max(outerRadius, blurRadius);
+ }
+
+ // If stroke is greater than width or height, this is still a fill,
+ // otherwise we compute stroke params.
+ if (isCircle) {
+ innerRadius = devRadius - insetWidth;
+ type = innerRadius > 0 ? kStroke_RRectType : kFill_RRectType;
+ } else {
+ if (insetWidth <= 0.5f * std::min(devRect.getWidth(), devRect.getHeight())) {
+ // We don't worry about a real inner radius, we just need to know if we
+ // need to create overstroke vertices.
+ innerRadius = std::max(insetWidth - umbraInset, 0.0f);
+ type = innerRadius > 0 ? kOverstroke_RRectType : kStroke_RRectType;
+ }
+ }
+ const bool isStroked = (kStroke_RRectType == type);
+ return std::make_unique<Geometry>(Geometry{color, outerRadius, umbraInset, innerRadius,
+ blurRadius, bounds, type, isCircle, isStroked});
+}
+
+std::unique_ptr<Geometry> getAmbientShadowGeometry(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent,
+ const vec4& ambientColor) {
+ float devSpaceInsetWidth = AmbientBlurRadius(casterZ);
+ const float umbraRecipAlpha = AmbientRecipAlpha(casterZ);
+ const float devSpaceAmbientBlur = devSpaceInsetWidth * umbraRecipAlpha;
+
+ // Outset the shadow rrect to the border of the penumbra
+ float ambientPathOutset = devSpaceInsetWidth;
+ FloatRect outsetRect(casterRect);
+ outsetRect.left -= ambientPathOutset;
+ outsetRect.top -= ambientPathOutset;
+ outsetRect.right += ambientPathOutset;
+ outsetRect.bottom += ambientPathOutset;
+
+ float outsetRad = casterCornerRadius + ambientPathOutset;
+ if (casterIsTranslucent) {
+ // set a large inset to force a fill
+ devSpaceInsetWidth = outsetRect.getWidth();
+ }
+
+ return getShadowGeometry(ambientColor, outsetRect, std::abs(outsetRad), devSpaceAmbientBlur,
+ std::abs(devSpaceInsetWidth));
+}
+
+std::unique_ptr<Geometry> getSpotShadowGeometry(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent, const vec4& spotColor,
+ const vec3& lightPosition, float lightRadius) {
+ float devSpaceSpotBlur;
+ float spotScale;
+ vec2 spotOffset;
+ GetSpotParams(casterZ, lightPosition.x, lightPosition.y, lightPosition.z, lightRadius,
+ devSpaceSpotBlur, spotScale, spotOffset);
+ // handle scale of radius due to CTM
+ const float srcSpaceSpotBlur = devSpaceSpotBlur;
+
+ // Adjust translate for the effect of the scale.
+ spotOffset.x += spotScale;
+ spotOffset.y += spotScale;
+
+ // Compute the transformed shadow rect
+ ui::Transform shadowTransform;
+ shadowTransform.set(spotOffset.x, spotOffset.y);
+ shadowTransform.set(spotScale, 0, 0, spotScale);
+ FloatRect spotShadowRect = shadowTransform.transform(casterRect);
+ float spotShadowRadius = casterCornerRadius * spotScale;
+
+ // Compute the insetWidth
+ float blurOutset = srcSpaceSpotBlur;
+ float insetWidth = blurOutset;
+ if (casterIsTranslucent) {
+ // If transparent, just do a fill
+ insetWidth += spotShadowRect.getWidth();
+ } else {
+ // For shadows, instead of using a stroke we specify an inset from the penumbra
+ // border. We want to extend this inset area so that it meets up with the caster
+ // geometry. The inset geometry will by default already be inset by the blur width.
+ //
+ // We compare the min and max corners inset by the radius between the original
+ // rrect and the shadow rrect. The distance between the two plus the difference
+ // between the scaled radius and the original radius gives the distance from the
+ // transformed shadow shape to the original shape in that corner. The max
+ // of these gives the maximum distance we need to cover.
+ //
+ // Since we are outsetting by 1/2 the blur distance, we just add the maxOffset to
+ // that to get the full insetWidth.
+ float maxOffset;
+ if (casterCornerRadius <= 0.f) {
+ // Manhattan distance works better for rects
+ maxOffset = std::max(std::max(std::abs(spotShadowRect.left - casterRect.left),
+ std::abs(spotShadowRect.top - casterRect.top)),
+ std::max(std::abs(spotShadowRect.right - casterRect.right),
+ std::abs(spotShadowRect.bottom - casterRect.bottom)));
+ } else {
+ float dr = spotShadowRadius - casterCornerRadius;
+ vec2 upperLeftOffset = vec2(spotShadowRect.left - casterRect.left + dr,
+ spotShadowRect.top - casterRect.top + dr);
+ vec2 lowerRightOffset = vec2(spotShadowRect.right - casterRect.right - dr,
+ spotShadowRect.bottom - casterRect.bottom - dr);
+ maxOffset = sqrt(std::max(dot(upperLeftOffset, lowerRightOffset),
+ dot(lowerRightOffset, lowerRightOffset))) +
+ dr;
+ }
+ insetWidth += std::max(blurOutset, maxOffset);
+ }
+
+ // Outset the shadow rrect to the border of the penumbra
+ spotShadowRadius += blurOutset;
+ spotShadowRect.left -= blurOutset;
+ spotShadowRect.top -= blurOutset;
+ spotShadowRect.right += blurOutset;
+ spotShadowRect.bottom += blurOutset;
+
+ return getShadowGeometry(spotColor, spotShadowRect, std::abs(spotShadowRadius),
+ 2.0f * devSpaceSpotBlur, std::abs(insetWidth));
+}
+
+void fillShadowTextureData(uint8_t* data, size_t shadowTextureWidth) {
+ for (int i = 0; i < shadowTextureWidth; i++) {
+ const float d = 1 - i / ((shadowTextureWidth * 1.0f) - 1.0f);
+ data[i] = static_cast<uint8_t>((exp(-4.0f * d * d) - 0.018f) * 255);
+ }
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLSkiaShadowPort.h b/media/libstagefright/renderfright/gl/GLSkiaShadowPort.h
new file mode 100644
index 0000000..912c8bb
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLSkiaShadowPort.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <math/vec4.h>
+#include <renderengine/Mesh.h>
+#include <ui/Rect.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+/**
+ * The shadow geometry logic and vertex generation code has been ported from skia shadow
+ * fast path OpenGL implementation to draw shadows around rects and rounded rects including
+ * circles.
+ *
+ * path: skia/src/gpu/GrRenderTargetContext.cpp GrRenderTargetContext::drawFastShadow
+ *
+ * Modifications made:
+ * - Switched to using std lib math functions
+ * - Fall off function is implemented in vertex shader rather than a shadow texture
+ * - Removed transformations applied on the caster rect since the caster will be in local
+ * coordinate space and will be transformed by the vertex shader.
+ */
+
+enum RRectType {
+ kFill_RRectType,
+ kStroke_RRectType,
+ kOverstroke_RRectType,
+};
+
+struct Geometry {
+ vec4 fColor;
+ float fOuterRadius;
+ float fUmbraInset;
+ float fInnerRadius;
+ float fBlurRadius;
+ FloatRect fDevBounds;
+ RRectType fType;
+ bool fIsCircle;
+ bool fIsStroked;
+};
+
+std::unique_ptr<Geometry> getSpotShadowGeometry(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent, const vec4& spotColor,
+ const vec3& lightPosition, float lightRadius);
+
+std::unique_ptr<Geometry> getAmbientShadowGeometry(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent,
+ const vec4& ambientColor);
+
+int getVertexCountForGeometry(const Geometry& shadowGeometry);
+
+int getIndexCountForGeometry(const Geometry& shadowGeometry);
+
+void fillVerticesForGeometry(const Geometry& shadowGeometry, int vertexCount,
+ Mesh::VertexArray<vec2> position, Mesh::VertexArray<vec4> shadowColor,
+ Mesh::VertexArray<vec3> shadowParams);
+
+void fillIndicesForGeometry(const Geometry& shadowGeometry, int indexCount,
+ int startingVertexOffset, uint16_t* indices);
+
+/**
+ * Maps shadow geometry 'alpha' varying (1 for darkest, 0 for transparent) to
+ * darkness at that spot. Values are determined by an exponential falloff
+ * function provided by UX.
+ *
+ * The texture is used for quick lookup in theshadow shader.
+ *
+ * textureData - filled with shadow texture data that needs to be at least of
+ * size textureWidth
+ *
+ * textureWidth - width of the texture, height is always 1
+ */
+void fillShadowTextureData(uint8_t* textureData, size_t textureWidth);
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLVertexBuffer.cpp b/media/libstagefright/renderfright/gl/GLVertexBuffer.cpp
new file mode 100644
index 0000000..e50c471
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLVertexBuffer.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "GLVertexBuffer.h"
+
+#include <GLES/gl.h>
+#include <GLES2/gl2.h>
+#include <nativebase/nativebase.h>
+#include <utils/Trace.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GLVertexBuffer::GLVertexBuffer() {
+ glGenBuffers(1, &mBufferName);
+}
+
+GLVertexBuffer::~GLVertexBuffer() {
+ glDeleteBuffers(1, &mBufferName);
+}
+
+void GLVertexBuffer::allocateBuffers(const GLfloat data[], const GLuint size) {
+ ATRACE_CALL();
+ bind();
+ glBufferData(GL_ARRAY_BUFFER, size * sizeof(GLfloat), data, GL_STATIC_DRAW);
+ unbind();
+}
+
+void GLVertexBuffer::bind() const {
+ glBindBuffer(GL_ARRAY_BUFFER, mBufferName);
+}
+
+void GLVertexBuffer::unbind() const {
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLVertexBuffer.h b/media/libstagefright/renderfright/gl/GLVertexBuffer.h
new file mode 100644
index 0000000..c0fd0c1
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLVertexBuffer.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES2/gl2.h>
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLESRenderEngine;
+
+class GLVertexBuffer {
+public:
+ explicit GLVertexBuffer();
+ ~GLVertexBuffer();
+
+ void allocateBuffers(const GLfloat data[], const GLuint size);
+ uint32_t getBufferName() const { return mBufferName; }
+ void bind() const;
+ void unbind() const;
+
+private:
+ uint32_t mBufferName;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/ImageManager.cpp b/media/libstagefright/renderfright/gl/ImageManager.cpp
new file mode 100644
index 0000000..6256649
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/ImageManager.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#undef LOG_TAG
+#define LOG_TAG "RenderEngine"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include <pthread.h>
+
+#include <processgroup/sched_policy.h>
+#include <utils/Trace.h>
+#include "GLESRenderEngine.h"
+#include "ImageManager.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+ImageManager::ImageManager(GLESRenderEngine* engine) : mEngine(engine) {}
+
+void ImageManager::initThread() {
+ mThread = std::thread([this]() { threadMain(); });
+ pthread_setname_np(mThread.native_handle(), "ImageManager");
+ // Use SCHED_FIFO to minimize jitter
+ struct sched_param param = {0};
+ param.sched_priority = 2;
+ if (pthread_setschedparam(mThread.native_handle(), SCHED_FIFO, ¶m) != 0) {
+ ALOGE("Couldn't set SCHED_FIFO for ImageManager");
+ }
+}
+
+ImageManager::~ImageManager() {
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mRunning = false;
+ }
+ mCondition.notify_all();
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+}
+
+void ImageManager::cacheAsync(const sp<GraphicBuffer>& buffer,
+ const std::shared_ptr<Barrier>& barrier) {
+ if (buffer == nullptr) {
+ {
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ barrier->isOpen = true;
+ barrier->result = BAD_VALUE;
+ }
+ barrier->condition.notify_one();
+ return;
+ }
+ ATRACE_CALL();
+ QueueEntry entry = {QueueEntry::Operation::Insert, buffer, buffer->getId(), barrier};
+ queueOperation(std::move(entry));
+}
+
+status_t ImageManager::cache(const sp<GraphicBuffer>& buffer) {
+ ATRACE_CALL();
+ auto barrier = std::make_shared<Barrier>();
+ cacheAsync(buffer, barrier);
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ barrier->condition.wait(barrier->mutex,
+ [&]() REQUIRES(barrier->mutex) { return barrier->isOpen; });
+ return barrier->result;
+}
+
+void ImageManager::releaseAsync(uint64_t bufferId, const std::shared_ptr<Barrier>& barrier) {
+ ATRACE_CALL();
+ QueueEntry entry = {QueueEntry::Operation::Delete, nullptr, bufferId, barrier};
+ queueOperation(std::move(entry));
+}
+
+void ImageManager::queueOperation(const QueueEntry&& entry) {
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mQueue.emplace(entry);
+ ATRACE_INT("ImageManagerQueueDepth", mQueue.size());
+ }
+ mCondition.notify_one();
+}
+
+void ImageManager::threadMain() {
+ set_sched_policy(0, SP_FOREGROUND);
+ bool run;
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ run = mRunning;
+ }
+ while (run) {
+ QueueEntry entry;
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mCondition.wait(mMutex,
+ [&]() REQUIRES(mMutex) { return !mQueue.empty() || !mRunning; });
+ run = mRunning;
+
+ if (!mRunning) {
+ // if mRunning is false, then ImageManager is being destroyed, so
+ // bail out now.
+ break;
+ }
+
+ entry = mQueue.front();
+ mQueue.pop();
+ ATRACE_INT("ImageManagerQueueDepth", mQueue.size());
+ }
+
+ status_t result = NO_ERROR;
+ switch (entry.op) {
+ case QueueEntry::Operation::Delete:
+ mEngine->unbindExternalTextureBufferInternal(entry.bufferId);
+ break;
+ case QueueEntry::Operation::Insert:
+ result = mEngine->cacheExternalTextureBufferInternal(entry.buffer);
+ break;
+ }
+ if (entry.barrier != nullptr) {
+ {
+ std::lock_guard<std::mutex> entryLock(entry.barrier->mutex);
+ entry.barrier->result = result;
+ entry.barrier->isOpen = true;
+ }
+ entry.barrier->condition.notify_one();
+ }
+ }
+
+ ALOGD("Reached end of threadMain, terminating ImageManager thread!");
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/ImageManager.h b/media/libstagefright/renderfright/gl/ImageManager.h
new file mode 100644
index 0000000..be67de8
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/ImageManager.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include <ui/GraphicBuffer.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLESRenderEngine;
+
+class ImageManager {
+public:
+ struct Barrier {
+ std::mutex mutex;
+ std::condition_variable_any condition;
+ bool isOpen GUARDED_BY(mutex) = false;
+ status_t result GUARDED_BY(mutex) = NO_ERROR;
+ };
+ ImageManager(GLESRenderEngine* engine);
+ ~ImageManager();
+ // Starts the background thread for the ImageManager
+ // We need this to guarantee that the class is fully-constructed before the
+ // thread begins running.
+ void initThread();
+ void cacheAsync(const sp<GraphicBuffer>& buffer, const std::shared_ptr<Barrier>& barrier)
+ EXCLUDES(mMutex);
+ status_t cache(const sp<GraphicBuffer>& buffer);
+ void releaseAsync(uint64_t bufferId, const std::shared_ptr<Barrier>& barrier) EXCLUDES(mMutex);
+
+private:
+ struct QueueEntry {
+ enum class Operation { Delete, Insert };
+
+ Operation op = Operation::Delete;
+ sp<GraphicBuffer> buffer = nullptr;
+ uint64_t bufferId = 0;
+ std::shared_ptr<Barrier> barrier = nullptr;
+ };
+
+ void queueOperation(const QueueEntry&& entry);
+ void threadMain();
+ GLESRenderEngine* const mEngine;
+ std::thread mThread;
+ std::condition_variable_any mCondition;
+ std::mutex mMutex;
+ std::queue<QueueEntry> mQueue GUARDED_BY(mMutex);
+
+ bool mRunning GUARDED_BY(mMutex) = true;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/Program.cpp b/media/libstagefright/renderfright/gl/Program.cpp
new file mode 100644
index 0000000..f4fbf35
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/Program.cpp
@@ -0,0 +1,163 @@
+/*Gluint
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Program.h"
+
+#include <stdint.h>
+
+#include <log/log.h>
+#include <math/mat4.h>
+#include <utils/String8.h>
+#include "ProgramCache.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+Program::Program(const ProgramCache::Key& /*needs*/, const char* vertex, const char* fragment)
+ : mInitialized(false) {
+ GLuint vertexId = buildShader(vertex, GL_VERTEX_SHADER);
+ GLuint fragmentId = buildShader(fragment, GL_FRAGMENT_SHADER);
+ GLuint programId = glCreateProgram();
+ glAttachShader(programId, vertexId);
+ glAttachShader(programId, fragmentId);
+ glBindAttribLocation(programId, position, "position");
+ glBindAttribLocation(programId, texCoords, "texCoords");
+ glBindAttribLocation(programId, cropCoords, "cropCoords");
+ glBindAttribLocation(programId, shadowColor, "shadowColor");
+ glBindAttribLocation(programId, shadowParams, "shadowParams");
+ glLinkProgram(programId);
+
+ GLint status;
+ glGetProgramiv(programId, GL_LINK_STATUS, &status);
+ if (status != GL_TRUE) {
+ ALOGE("Error while linking shaders:");
+ GLint infoLen = 0;
+ glGetProgramiv(programId, GL_INFO_LOG_LENGTH, &infoLen);
+ if (infoLen > 1) {
+ GLchar log[infoLen];
+ glGetProgramInfoLog(programId, infoLen, 0, &log[0]);
+ ALOGE("%s", log);
+ }
+ glDetachShader(programId, vertexId);
+ glDetachShader(programId, fragmentId);
+ glDeleteShader(vertexId);
+ glDeleteShader(fragmentId);
+ glDeleteProgram(programId);
+ } else {
+ mProgram = programId;
+ mVertexShader = vertexId;
+ mFragmentShader = fragmentId;
+ mInitialized = true;
+ mProjectionMatrixLoc = glGetUniformLocation(programId, "projection");
+ mTextureMatrixLoc = glGetUniformLocation(programId, "texture");
+ mSamplerLoc = glGetUniformLocation(programId, "sampler");
+ mColorLoc = glGetUniformLocation(programId, "color");
+ mDisplayMaxLuminanceLoc = glGetUniformLocation(programId, "displayMaxLuminance");
+ mMaxMasteringLuminanceLoc = glGetUniformLocation(programId, "maxMasteringLuminance");
+ mMaxContentLuminanceLoc = glGetUniformLocation(programId, "maxContentLuminance");
+ mInputTransformMatrixLoc = glGetUniformLocation(programId, "inputTransformMatrix");
+ mOutputTransformMatrixLoc = glGetUniformLocation(programId, "outputTransformMatrix");
+ mCornerRadiusLoc = glGetUniformLocation(programId, "cornerRadius");
+ mCropCenterLoc = glGetUniformLocation(programId, "cropCenter");
+
+ // set-up the default values for our uniforms
+ glUseProgram(programId);
+ glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, mat4().asArray());
+ glEnableVertexAttribArray(0);
+ }
+}
+
+bool Program::isValid() const {
+ return mInitialized;
+}
+
+void Program::use() {
+ glUseProgram(mProgram);
+}
+
+GLuint Program::getAttrib(const char* name) const {
+ // TODO: maybe use a local cache
+ return glGetAttribLocation(mProgram, name);
+}
+
+GLint Program::getUniform(const char* name) const {
+ // TODO: maybe use a local cache
+ return glGetUniformLocation(mProgram, name);
+}
+
+GLuint Program::buildShader(const char* source, GLenum type) {
+ GLuint shader = glCreateShader(type);
+ glShaderSource(shader, 1, &source, 0);
+ glCompileShader(shader);
+ GLint status;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
+ if (status != GL_TRUE) {
+ // Some drivers return wrong values for GL_INFO_LOG_LENGTH
+ // use a fixed size instead
+ GLchar log[512];
+ glGetShaderInfoLog(shader, sizeof(log), 0, log);
+ ALOGE("Error while compiling shader: \n%s\n%s", source, log);
+ glDeleteShader(shader);
+ return 0;
+ }
+ return shader;
+}
+
+void Program::setUniforms(const Description& desc) {
+ // TODO: we should have a mechanism here to not always reset uniforms that
+ // didn't change for this program.
+
+ if (mSamplerLoc >= 0) {
+ glUniform1i(mSamplerLoc, 0);
+ glUniformMatrix4fv(mTextureMatrixLoc, 1, GL_FALSE, desc.texture.getMatrix().asArray());
+ }
+ if (mColorLoc >= 0) {
+ const float color[4] = {desc.color.r, desc.color.g, desc.color.b, desc.color.a};
+ glUniform4fv(mColorLoc, 1, color);
+ }
+ if (mInputTransformMatrixLoc >= 0) {
+ mat4 inputTransformMatrix = desc.inputTransformMatrix;
+ glUniformMatrix4fv(mInputTransformMatrixLoc, 1, GL_FALSE, inputTransformMatrix.asArray());
+ }
+ if (mOutputTransformMatrixLoc >= 0) {
+ // The output transform matrix and color matrix can be combined as one matrix
+ // that is applied right before applying OETF.
+ mat4 outputTransformMatrix = desc.colorMatrix * desc.outputTransformMatrix;
+ glUniformMatrix4fv(mOutputTransformMatrixLoc, 1, GL_FALSE, outputTransformMatrix.asArray());
+ }
+ if (mDisplayMaxLuminanceLoc >= 0) {
+ glUniform1f(mDisplayMaxLuminanceLoc, desc.displayMaxLuminance);
+ }
+ if (mMaxMasteringLuminanceLoc >= 0) {
+ glUniform1f(mMaxMasteringLuminanceLoc, desc.maxMasteringLuminance);
+ }
+ if (mMaxContentLuminanceLoc >= 0) {
+ glUniform1f(mMaxContentLuminanceLoc, desc.maxContentLuminance);
+ }
+ if (mCornerRadiusLoc >= 0) {
+ glUniform1f(mCornerRadiusLoc, desc.cornerRadius);
+ }
+ if (mCropCenterLoc >= 0) {
+ glUniform2f(mCropCenterLoc, desc.cropSize.x / 2.0f, desc.cropSize.y / 2.0f);
+ }
+ // these uniforms are always present
+ glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, desc.projectionMatrix.asArray());
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/Program.h b/media/libstagefright/renderfright/gl/Program.h
new file mode 100644
index 0000000..fc3755e
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/Program.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_PROGRAM_H
+#define SF_RENDER_ENGINE_PROGRAM_H
+
+#include <stdint.h>
+
+#include <GLES2/gl2.h>
+#include <renderengine/private/Description.h>
+#include "ProgramCache.h"
+
+namespace android {
+
+class String8;
+
+namespace renderengine {
+namespace gl {
+
+/*
+ * Abstracts a GLSL program comprising a vertex and fragment shader
+ */
+class Program {
+public:
+ // known locations for position and texture coordinates
+ enum {
+ /* position of each vertex for vertex shader */
+ position = 0,
+
+ /* UV coordinates for texture mapping */
+ texCoords = 1,
+
+ /* Crop coordinates, in pixels */
+ cropCoords = 2,
+
+ /* Shadow color */
+ shadowColor = 3,
+
+ /* Shadow params */
+ shadowParams = 4,
+ };
+
+ Program(const ProgramCache::Key& needs, const char* vertex, const char* fragment);
+ ~Program() = default;
+
+ /* whether this object is usable */
+ bool isValid() const;
+
+ /* Binds this program to the GLES context */
+ void use();
+
+ /* Returns the location of the specified attribute */
+ GLuint getAttrib(const char* name) const;
+
+ /* Returns the location of the specified uniform */
+ GLint getUniform(const char* name) const;
+
+ /* set-up uniforms from the description */
+ void setUniforms(const Description& desc);
+
+private:
+ GLuint buildShader(const char* source, GLenum type);
+
+ // whether the initialization succeeded
+ bool mInitialized;
+
+ // Name of the OpenGL program and shaders
+ GLuint mProgram;
+ GLuint mVertexShader;
+ GLuint mFragmentShader;
+
+ /* location of the projection matrix uniform */
+ GLint mProjectionMatrixLoc;
+
+ /* location of the texture matrix uniform */
+ GLint mTextureMatrixLoc;
+
+ /* location of the sampler uniform */
+ GLint mSamplerLoc;
+
+ /* location of the color uniform */
+ GLint mColorLoc;
+
+ /* location of display luminance uniform */
+ GLint mDisplayMaxLuminanceLoc;
+ /* location of max mastering luminance uniform */
+ GLint mMaxMasteringLuminanceLoc;
+ /* location of max content luminance uniform */
+ GLint mMaxContentLuminanceLoc;
+
+ /* location of transform matrix */
+ GLint mInputTransformMatrixLoc;
+ GLint mOutputTransformMatrixLoc;
+
+ /* location of corner radius uniform */
+ GLint mCornerRadiusLoc;
+
+ /* location of surface crop origin uniform, for rounded corner clipping */
+ GLint mCropCenterLoc;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
+
+#endif /* SF_RENDER_ENGINE_PROGRAM_H */
diff --git a/media/libstagefright/renderfright/gl/ProgramCache.cpp b/media/libstagefright/renderfright/gl/ProgramCache.cpp
new file mode 100644
index 0000000..3ae35ec
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/ProgramCache.cpp
@@ -0,0 +1,800 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "ProgramCache.h"
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <log/log.h>
+#include <renderengine/private/Description.h>
+#include <utils/String8.h>
+#include <utils/Trace.h>
+#include "Program.h"
+
+ANDROID_SINGLETON_STATIC_INSTANCE(android::renderengine::gl::ProgramCache)
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+/*
+ * A simple formatter class to automatically add the endl and
+ * manage the indentation.
+ */
+
+class Formatter;
+static Formatter& indent(Formatter& f);
+static Formatter& dedent(Formatter& f);
+
+class Formatter {
+ String8 mString;
+ int mIndent;
+ typedef Formatter& (*FormaterManipFunc)(Formatter&);
+ friend Formatter& indent(Formatter& f);
+ friend Formatter& dedent(Formatter& f);
+
+public:
+ Formatter() : mIndent(0) {}
+
+ String8 getString() const { return mString; }
+
+ friend Formatter& operator<<(Formatter& out, const char* in) {
+ for (int i = 0; i < out.mIndent; i++) {
+ out.mString.append(" ");
+ }
+ out.mString.append(in);
+ out.mString.append("\n");
+ return out;
+ }
+ friend inline Formatter& operator<<(Formatter& out, const String8& in) {
+ return operator<<(out, in.string());
+ }
+ friend inline Formatter& operator<<(Formatter& to, FormaterManipFunc func) {
+ return (*func)(to);
+ }
+};
+Formatter& indent(Formatter& f) {
+ f.mIndent++;
+ return f;
+}
+Formatter& dedent(Formatter& f) {
+ f.mIndent--;
+ return f;
+}
+
+void ProgramCache::primeCache(
+ EGLContext context, bool useColorManagement, bool toneMapperShaderOnly) {
+ auto& cache = mCaches[context];
+ uint32_t shaderCount = 0;
+
+ if (toneMapperShaderOnly) {
+ Key shaderKey;
+ // base settings used by HDR->SDR tonemap only
+ shaderKey.set(Key::BLEND_MASK | Key::INPUT_TRANSFORM_MATRIX_MASK |
+ Key::OUTPUT_TRANSFORM_MATRIX_MASK | Key::OUTPUT_TF_MASK |
+ Key::OPACITY_MASK | Key::ALPHA_MASK |
+ Key::ROUNDED_CORNERS_MASK | Key::TEXTURE_MASK,
+ Key::BLEND_NORMAL | Key::INPUT_TRANSFORM_MATRIX_ON |
+ Key::OUTPUT_TRANSFORM_MATRIX_ON | Key::OUTPUT_TF_SRGB |
+ Key::OPACITY_OPAQUE | Key::ALPHA_EQ_ONE |
+ Key::ROUNDED_CORNERS_OFF | Key::TEXTURE_EXT);
+ for (int i = 0; i < 4; i++) {
+ // Cache input transfer for HLG & ST2084
+ shaderKey.set(Key::INPUT_TF_MASK, (i & 1) ?
+ Key::INPUT_TF_HLG : Key::INPUT_TF_ST2084);
+
+ // Cache Y410 input on or off
+ shaderKey.set(Key::Y410_BT2020_MASK, (i & 2) ?
+ Key::Y410_BT2020_ON : Key::Y410_BT2020_OFF);
+ if (cache.count(shaderKey) == 0) {
+ cache.emplace(shaderKey, generateProgram(shaderKey));
+ shaderCount++;
+ }
+ }
+ return;
+ }
+
+ uint32_t keyMask = Key::BLEND_MASK | Key::OPACITY_MASK | Key::ALPHA_MASK | Key::TEXTURE_MASK
+ | Key::ROUNDED_CORNERS_MASK;
+ // Prime the cache for all combinations of the above masks,
+ // leaving off the experimental color matrix mask options.
+
+ nsecs_t timeBefore = systemTime();
+ for (uint32_t keyVal = 0; keyVal <= keyMask; keyVal++) {
+ Key shaderKey;
+ shaderKey.set(keyMask, keyVal);
+ uint32_t tex = shaderKey.getTextureTarget();
+ if (tex != Key::TEXTURE_OFF && tex != Key::TEXTURE_EXT && tex != Key::TEXTURE_2D) {
+ continue;
+ }
+ if (cache.count(shaderKey) == 0) {
+ cache.emplace(shaderKey, generateProgram(shaderKey));
+ shaderCount++;
+ }
+ }
+
+ // Prime for sRGB->P3 conversion
+ if (useColorManagement) {
+ Key shaderKey;
+ shaderKey.set(Key::BLEND_MASK | Key::OUTPUT_TRANSFORM_MATRIX_MASK | Key::INPUT_TF_MASK |
+ Key::OUTPUT_TF_MASK,
+ Key::BLEND_PREMULT | Key::OUTPUT_TRANSFORM_MATRIX_ON | Key::INPUT_TF_SRGB |
+ Key::OUTPUT_TF_SRGB);
+ for (int i = 0; i < 16; i++) {
+ shaderKey.set(Key::OPACITY_MASK,
+ (i & 1) ? Key::OPACITY_OPAQUE : Key::OPACITY_TRANSLUCENT);
+ shaderKey.set(Key::ALPHA_MASK, (i & 2) ? Key::ALPHA_LT_ONE : Key::ALPHA_EQ_ONE);
+
+ // Cache rounded corners
+ shaderKey.set(Key::ROUNDED_CORNERS_MASK,
+ (i & 4) ? Key::ROUNDED_CORNERS_ON : Key::ROUNDED_CORNERS_OFF);
+
+ // Cache texture off option for window transition
+ shaderKey.set(Key::TEXTURE_MASK, (i & 8) ? Key::TEXTURE_EXT : Key::TEXTURE_OFF);
+ if (cache.count(shaderKey) == 0) {
+ cache.emplace(shaderKey, generateProgram(shaderKey));
+ shaderCount++;
+ }
+ }
+ }
+
+ nsecs_t timeAfter = systemTime();
+ float compileTimeMs = static_cast<float>(timeAfter - timeBefore) / 1.0E6;
+ ALOGD("shader cache generated - %u shaders in %f ms\n", shaderCount, compileTimeMs);
+}
+
+ProgramCache::Key ProgramCache::computeKey(const Description& description) {
+ Key needs;
+ needs.set(Key::TEXTURE_MASK,
+ !description.textureEnabled
+ ? Key::TEXTURE_OFF
+ : description.texture.getTextureTarget() == GL_TEXTURE_EXTERNAL_OES
+ ? Key::TEXTURE_EXT
+ : description.texture.getTextureTarget() == GL_TEXTURE_2D
+ ? Key::TEXTURE_2D
+ : Key::TEXTURE_OFF)
+ .set(Key::ALPHA_MASK, (description.color.a < 1) ? Key::ALPHA_LT_ONE : Key::ALPHA_EQ_ONE)
+ .set(Key::BLEND_MASK,
+ description.isPremultipliedAlpha ? Key::BLEND_PREMULT : Key::BLEND_NORMAL)
+ .set(Key::OPACITY_MASK,
+ description.isOpaque ? Key::OPACITY_OPAQUE : Key::OPACITY_TRANSLUCENT)
+ .set(Key::Key::INPUT_TRANSFORM_MATRIX_MASK,
+ description.hasInputTransformMatrix() ? Key::INPUT_TRANSFORM_MATRIX_ON
+ : Key::INPUT_TRANSFORM_MATRIX_OFF)
+ .set(Key::Key::OUTPUT_TRANSFORM_MATRIX_MASK,
+ description.hasOutputTransformMatrix() || description.hasColorMatrix()
+ ? Key::OUTPUT_TRANSFORM_MATRIX_ON
+ : Key::OUTPUT_TRANSFORM_MATRIX_OFF)
+ .set(Key::ROUNDED_CORNERS_MASK,
+ description.cornerRadius > 0 ? Key::ROUNDED_CORNERS_ON : Key::ROUNDED_CORNERS_OFF)
+ .set(Key::SHADOW_MASK, description.drawShadows ? Key::SHADOW_ON : Key::SHADOW_OFF);
+ needs.set(Key::Y410_BT2020_MASK,
+ description.isY410BT2020 ? Key::Y410_BT2020_ON : Key::Y410_BT2020_OFF);
+
+ if (needs.hasTransformMatrix() ||
+ (description.inputTransferFunction != description.outputTransferFunction)) {
+ switch (description.inputTransferFunction) {
+ case Description::TransferFunction::LINEAR:
+ default:
+ needs.set(Key::INPUT_TF_MASK, Key::INPUT_TF_LINEAR);
+ break;
+ case Description::TransferFunction::SRGB:
+ needs.set(Key::INPUT_TF_MASK, Key::INPUT_TF_SRGB);
+ break;
+ case Description::TransferFunction::ST2084:
+ needs.set(Key::INPUT_TF_MASK, Key::INPUT_TF_ST2084);
+ break;
+ case Description::TransferFunction::HLG:
+ needs.set(Key::INPUT_TF_MASK, Key::INPUT_TF_HLG);
+ break;
+ }
+
+ switch (description.outputTransferFunction) {
+ case Description::TransferFunction::LINEAR:
+ default:
+ needs.set(Key::OUTPUT_TF_MASK, Key::OUTPUT_TF_LINEAR);
+ break;
+ case Description::TransferFunction::SRGB:
+ needs.set(Key::OUTPUT_TF_MASK, Key::OUTPUT_TF_SRGB);
+ break;
+ case Description::TransferFunction::ST2084:
+ needs.set(Key::OUTPUT_TF_MASK, Key::OUTPUT_TF_ST2084);
+ break;
+ case Description::TransferFunction::HLG:
+ needs.set(Key::OUTPUT_TF_MASK, Key::OUTPUT_TF_HLG);
+ break;
+ }
+ }
+
+ return needs;
+}
+
+// Generate EOTF that converts signal values to relative display light,
+// both normalized to [0, 1].
+void ProgramCache::generateEOTF(Formatter& fs, const Key& needs) {
+ switch (needs.getInputTF()) {
+ case Key::INPUT_TF_SRGB:
+ fs << R"__SHADER__(
+ float EOTF_sRGB(float srgb) {
+ return srgb <= 0.04045 ? srgb / 12.92 : pow((srgb + 0.055) / 1.055, 2.4);
+ }
+
+ vec3 EOTF_sRGB(const vec3 srgb) {
+ return vec3(EOTF_sRGB(srgb.r), EOTF_sRGB(srgb.g), EOTF_sRGB(srgb.b));
+ }
+
+ vec3 EOTF(const vec3 srgb) {
+ return sign(srgb.rgb) * EOTF_sRGB(abs(srgb.rgb));
+ }
+ )__SHADER__";
+ break;
+ case Key::INPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ vec3 EOTF(const highp vec3 color) {
+ const highp float m1 = (2610.0 / 4096.0) / 4.0;
+ const highp float m2 = (2523.0 / 4096.0) * 128.0;
+ const highp float c1 = (3424.0 / 4096.0);
+ const highp float c2 = (2413.0 / 4096.0) * 32.0;
+ const highp float c3 = (2392.0 / 4096.0) * 32.0;
+
+ highp vec3 tmp = pow(clamp(color, 0.0, 1.0), 1.0 / vec3(m2));
+ tmp = max(tmp - c1, 0.0) / (c2 - c3 * tmp);
+ return pow(tmp, 1.0 / vec3(m1));
+ }
+ )__SHADER__";
+ break;
+ case Key::INPUT_TF_HLG:
+ fs << R"__SHADER__(
+ highp float EOTF_channel(const highp float channel) {
+ const highp float a = 0.17883277;
+ const highp float b = 0.28466892;
+ const highp float c = 0.55991073;
+ return channel <= 0.5 ? channel * channel / 3.0 :
+ (exp((channel - c) / a) + b) / 12.0;
+ }
+
+ vec3 EOTF(const highp vec3 color) {
+ return vec3(EOTF_channel(color.r), EOTF_channel(color.g),
+ EOTF_channel(color.b));
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ vec3 EOTF(const vec3 linear) {
+ return linear;
+ }
+ )__SHADER__";
+ break;
+ }
+}
+
+void ProgramCache::generateToneMappingProcess(Formatter& fs, const Key& needs) {
+ // Convert relative light to absolute light.
+ switch (needs.getInputTF()) {
+ case Key::INPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ highp vec3 ScaleLuminance(highp vec3 color) {
+ return color * 10000.0;
+ }
+ )__SHADER__";
+ break;
+ case Key::INPUT_TF_HLG:
+ fs << R"__SHADER__(
+ highp vec3 ScaleLuminance(highp vec3 color) {
+ // The formula is:
+ // alpha * pow(Y, gamma - 1.0) * color + beta;
+ // where alpha is 1000.0, gamma is 1.2, beta is 0.0.
+ return color * 1000.0 * pow(color.y, 0.2);
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ highp vec3 ScaleLuminance(highp vec3 color) {
+ return color * displayMaxLuminance;
+ }
+ )__SHADER__";
+ break;
+ }
+
+ // Tone map absolute light to display luminance range.
+ switch (needs.getInputTF()) {
+ case Key::INPUT_TF_ST2084:
+ case Key::INPUT_TF_HLG:
+ switch (needs.getOutputTF()) {
+ case Key::OUTPUT_TF_HLG:
+ // Right now when mixed PQ and HLG contents are presented,
+ // HLG content will always be converted to PQ. However, for
+ // completeness, we simply clamp the value to [0.0, 1000.0].
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ return clamp(color, 0.0, 1000.0);
+ }
+ )__SHADER__";
+ break;
+ case Key::OUTPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ return color;
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ float maxMasteringLumi = maxMasteringLuminance;
+ float maxContentLumi = maxContentLuminance;
+ float maxInLumi = min(maxMasteringLumi, maxContentLumi);
+ float maxOutLumi = displayMaxLuminance;
+
+ float nits = color.y;
+
+ // clamp to max input luminance
+ nits = clamp(nits, 0.0, maxInLumi);
+
+ // scale [0.0, maxInLumi] to [0.0, maxOutLumi]
+ if (maxInLumi <= maxOutLumi) {
+ return color * (maxOutLumi / maxInLumi);
+ } else {
+ // three control points
+ const float x0 = 10.0;
+ const float y0 = 17.0;
+ float x1 = maxOutLumi * 0.75;
+ float y1 = x1;
+ float x2 = x1 + (maxInLumi - x1) / 2.0;
+ float y2 = y1 + (maxOutLumi - y1) * 0.75;
+
+ // horizontal distances between the last three control points
+ float h12 = x2 - x1;
+ float h23 = maxInLumi - x2;
+ // tangents at the last three control points
+ float m1 = (y2 - y1) / h12;
+ float m3 = (maxOutLumi - y2) / h23;
+ float m2 = (m1 + m3) / 2.0;
+
+ if (nits < x0) {
+ // scale [0.0, x0] to [0.0, y0] linearly
+ float slope = y0 / x0;
+ return color * slope;
+ } else if (nits < x1) {
+ // scale [x0, x1] to [y0, y1] linearly
+ float slope = (y1 - y0) / (x1 - x0);
+ nits = y0 + (nits - x0) * slope;
+ } else if (nits < x2) {
+ // scale [x1, x2] to [y1, y2] using Hermite interp
+ float t = (nits - x1) / h12;
+ nits = (y1 * (1.0 + 2.0 * t) + h12 * m1 * t) * (1.0 - t) * (1.0 - t) +
+ (y2 * (3.0 - 2.0 * t) + h12 * m2 * (t - 1.0)) * t * t;
+ } else {
+ // scale [x2, maxInLumi] to [y2, maxOutLumi] using Hermite interp
+ float t = (nits - x2) / h23;
+ nits = (y2 * (1.0 + 2.0 * t) + h23 * m2 * t) * (1.0 - t) * (1.0 - t) +
+ (maxOutLumi * (3.0 - 2.0 * t) + h23 * m3 * (t - 1.0)) * t * t;
+ }
+ }
+
+ // color.y is greater than x0 and is thus non-zero
+ return color * (nits / color.y);
+ }
+ )__SHADER__";
+ break;
+ }
+ break;
+ default:
+ // inverse tone map; the output luminance can be up to maxOutLumi.
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ const float maxOutLumi = 3000.0;
+
+ const float x0 = 5.0;
+ const float y0 = 2.5;
+ float x1 = displayMaxLuminance * 0.7;
+ float y1 = maxOutLumi * 0.15;
+ float x2 = displayMaxLuminance * 0.9;
+ float y2 = maxOutLumi * 0.45;
+ float x3 = displayMaxLuminance;
+ float y3 = maxOutLumi;
+
+ float c1 = y1 / 3.0;
+ float c2 = y2 / 2.0;
+ float c3 = y3 / 1.5;
+
+ float nits = color.y;
+
+ float scale;
+ if (nits <= x0) {
+ // scale [0.0, x0] to [0.0, y0] linearly
+ const float slope = y0 / x0;
+ return color * slope;
+ } else if (nits <= x1) {
+ // scale [x0, x1] to [y0, y1] using a curve
+ float t = (nits - x0) / (x1 - x0);
+ nits = (1.0 - t) * (1.0 - t) * y0 + 2.0 * (1.0 - t) * t * c1 + t * t * y1;
+ } else if (nits <= x2) {
+ // scale [x1, x2] to [y1, y2] using a curve
+ float t = (nits - x1) / (x2 - x1);
+ nits = (1.0 - t) * (1.0 - t) * y1 + 2.0 * (1.0 - t) * t * c2 + t * t * y2;
+ } else {
+ // scale [x2, x3] to [y2, y3] using a curve
+ float t = (nits - x2) / (x3 - x2);
+ nits = (1.0 - t) * (1.0 - t) * y2 + 2.0 * (1.0 - t) * t * c3 + t * t * y3;
+ }
+
+ // color.y is greater than x0 and is thus non-zero
+ return color * (nits / color.y);
+ }
+ )__SHADER__";
+ break;
+ }
+
+ // convert absolute light to relative light.
+ switch (needs.getOutputTF()) {
+ case Key::OUTPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ highp vec3 NormalizeLuminance(highp vec3 color) {
+ return color / 10000.0;
+ }
+ )__SHADER__";
+ break;
+ case Key::OUTPUT_TF_HLG:
+ fs << R"__SHADER__(
+ highp vec3 NormalizeLuminance(highp vec3 color) {
+ return color / 1000.0 * pow(color.y / 1000.0, -0.2 / 1.2);
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ highp vec3 NormalizeLuminance(highp vec3 color) {
+ return color / displayMaxLuminance;
+ }
+ )__SHADER__";
+ break;
+ }
+}
+
+// Generate OOTF that modifies the relative scence light to relative display light.
+void ProgramCache::generateOOTF(Formatter& fs, const ProgramCache::Key& needs) {
+ if (!needs.needsToneMapping()) {
+ fs << R"__SHADER__(
+ highp vec3 OOTF(const highp vec3 color) {
+ return color;
+ }
+ )__SHADER__";
+ } else {
+ generateToneMappingProcess(fs, needs);
+ fs << R"__SHADER__(
+ highp vec3 OOTF(const highp vec3 color) {
+ return NormalizeLuminance(ToneMap(ScaleLuminance(color)));
+ }
+ )__SHADER__";
+ }
+}
+
+// Generate OETF that converts relative display light to signal values,
+// both normalized to [0, 1]
+void ProgramCache::generateOETF(Formatter& fs, const Key& needs) {
+ switch (needs.getOutputTF()) {
+ case Key::OUTPUT_TF_SRGB:
+ fs << R"__SHADER__(
+ float OETF_sRGB(const float linear) {
+ return linear <= 0.0031308 ?
+ linear * 12.92 : (pow(linear, 1.0 / 2.4) * 1.055) - 0.055;
+ }
+
+ vec3 OETF_sRGB(const vec3 linear) {
+ return vec3(OETF_sRGB(linear.r), OETF_sRGB(linear.g), OETF_sRGB(linear.b));
+ }
+
+ vec3 OETF(const vec3 linear) {
+ return sign(linear.rgb) * OETF_sRGB(abs(linear.rgb));
+ }
+ )__SHADER__";
+ break;
+ case Key::OUTPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ vec3 OETF(const vec3 linear) {
+ const highp float m1 = (2610.0 / 4096.0) / 4.0;
+ const highp float m2 = (2523.0 / 4096.0) * 128.0;
+ const highp float c1 = (3424.0 / 4096.0);
+ const highp float c2 = (2413.0 / 4096.0) * 32.0;
+ const highp float c3 = (2392.0 / 4096.0) * 32.0;
+
+ highp vec3 tmp = pow(linear, vec3(m1));
+ tmp = (c1 + c2 * tmp) / (1.0 + c3 * tmp);
+ return pow(tmp, vec3(m2));
+ }
+ )__SHADER__";
+ break;
+ case Key::OUTPUT_TF_HLG:
+ fs << R"__SHADER__(
+ highp float OETF_channel(const highp float channel) {
+ const highp float a = 0.17883277;
+ const highp float b = 0.28466892;
+ const highp float c = 0.55991073;
+ return channel <= 1.0 / 12.0 ? sqrt(3.0 * channel) :
+ a * log(12.0 * channel - b) + c;
+ }
+
+ vec3 OETF(const highp vec3 color) {
+ return vec3(OETF_channel(color.r), OETF_channel(color.g),
+ OETF_channel(color.b));
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ vec3 OETF(const vec3 linear) {
+ return linear;
+ }
+ )__SHADER__";
+ break;
+ }
+}
+
+String8 ProgramCache::generateVertexShader(const Key& needs) {
+ Formatter vs;
+ if (needs.hasTextureCoords()) {
+ vs << "attribute vec4 texCoords;"
+ << "varying vec2 outTexCoords;";
+ }
+ if (needs.hasRoundedCorners()) {
+ vs << "attribute lowp vec4 cropCoords;";
+ vs << "varying lowp vec2 outCropCoords;";
+ }
+ if (needs.drawShadows()) {
+ vs << "attribute lowp vec4 shadowColor;";
+ vs << "varying lowp vec4 outShadowColor;";
+ vs << "attribute lowp vec4 shadowParams;";
+ vs << "varying lowp vec3 outShadowParams;";
+ }
+ vs << "attribute vec4 position;"
+ << "uniform mat4 projection;"
+ << "uniform mat4 texture;"
+ << "void main(void) {" << indent << "gl_Position = projection * position;";
+ if (needs.hasTextureCoords()) {
+ vs << "outTexCoords = (texture * texCoords).st;";
+ }
+ if (needs.hasRoundedCorners()) {
+ vs << "outCropCoords = cropCoords.st;";
+ }
+ if (needs.drawShadows()) {
+ vs << "outShadowColor = shadowColor;";
+ vs << "outShadowParams = shadowParams.xyz;";
+ }
+ vs << dedent << "}";
+ return vs.getString();
+}
+
+String8 ProgramCache::generateFragmentShader(const Key& needs) {
+ Formatter fs;
+ if (needs.getTextureTarget() == Key::TEXTURE_EXT) {
+ fs << "#extension GL_OES_EGL_image_external : require";
+ }
+
+ // default precision is required-ish in fragment shaders
+ fs << "precision mediump float;";
+
+ if (needs.getTextureTarget() == Key::TEXTURE_EXT) {
+ fs << "uniform samplerExternalOES sampler;";
+ } else if (needs.getTextureTarget() == Key::TEXTURE_2D) {
+ fs << "uniform sampler2D sampler;";
+ }
+
+ if (needs.hasTextureCoords()) {
+ fs << "varying vec2 outTexCoords;";
+ }
+
+ if (needs.hasRoundedCorners()) {
+ // Rounded corners implementation using a signed distance function.
+ fs << R"__SHADER__(
+ uniform float cornerRadius;
+ uniform vec2 cropCenter;
+ varying vec2 outCropCoords;
+
+ /**
+ * This function takes the current crop coordinates and calculates an alpha value based
+ * on the corner radius and distance from the crop center.
+ */
+ float applyCornerRadius(vec2 cropCoords)
+ {
+ vec2 position = cropCoords - cropCenter;
+ // Scale down the dist vector here, as otherwise large corner
+ // radii can cause floating point issues when computing the norm
+ vec2 dist = (abs(position) - cropCenter + vec2(cornerRadius)) / 16.0;
+ // Once we've found the norm, then scale back up.
+ float plane = length(max(dist, vec2(0.0))) * 16.0;
+ return 1.0 - clamp(plane - cornerRadius, 0.0, 1.0);
+ }
+ )__SHADER__";
+ }
+
+ if (needs.drawShadows()) {
+ fs << R"__SHADER__(
+ varying lowp vec4 outShadowColor;
+ varying lowp vec3 outShadowParams;
+
+ /**
+ * Returns the shadow color.
+ */
+ vec4 getShadowColor()
+ {
+ lowp float d = length(outShadowParams.xy);
+ vec2 uv = vec2(outShadowParams.z * (1.0 - d), 0.5);
+ lowp float factor = texture2D(sampler, uv).a;
+ return outShadowColor * factor;
+ }
+ )__SHADER__";
+ }
+
+ if (needs.getTextureTarget() == Key::TEXTURE_OFF || needs.hasAlpha()) {
+ fs << "uniform vec4 color;";
+ }
+
+ if (needs.isY410BT2020()) {
+ fs << R"__SHADER__(
+ vec3 convertY410BT2020(const vec3 color) {
+ const vec3 offset = vec3(0.0625, 0.5, 0.5);
+ const mat3 transform = mat3(
+ vec3(1.1678, 1.1678, 1.1678),
+ vec3( 0.0, -0.1878, 2.1481),
+ vec3(1.6836, -0.6523, 0.0));
+ // Y is in G, U is in R, and V is in B
+ return clamp(transform * (color.grb - offset), 0.0, 1.0);
+ }
+ )__SHADER__";
+ }
+
+ if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
+ if (needs.needsToneMapping()) {
+ fs << "uniform float displayMaxLuminance;";
+ fs << "uniform float maxMasteringLuminance;";
+ fs << "uniform float maxContentLuminance;";
+ }
+
+ if (needs.hasInputTransformMatrix()) {
+ fs << "uniform mat4 inputTransformMatrix;";
+ fs << R"__SHADER__(
+ highp vec3 InputTransform(const highp vec3 color) {
+ return clamp(vec3(inputTransformMatrix * vec4(color, 1.0)), 0.0, 1.0);
+ }
+ )__SHADER__";
+ } else {
+ fs << R"__SHADER__(
+ highp vec3 InputTransform(const highp vec3 color) {
+ return color;
+ }
+ )__SHADER__";
+ }
+
+ // the transformation from a wider colorspace to a narrower one can
+ // result in >1.0 or <0.0 pixel values
+ if (needs.hasOutputTransformMatrix()) {
+ fs << "uniform mat4 outputTransformMatrix;";
+ fs << R"__SHADER__(
+ highp vec3 OutputTransform(const highp vec3 color) {
+ return clamp(vec3(outputTransformMatrix * vec4(color, 1.0)), 0.0, 1.0);
+ }
+ )__SHADER__";
+ } else {
+ fs << R"__SHADER__(
+ highp vec3 OutputTransform(const highp vec3 color) {
+ return clamp(color, 0.0, 1.0);
+ }
+ )__SHADER__";
+ }
+
+ generateEOTF(fs, needs);
+ generateOOTF(fs, needs);
+ generateOETF(fs, needs);
+ }
+
+ fs << "void main(void) {" << indent;
+ if (needs.drawShadows()) {
+ fs << "gl_FragColor = getShadowColor();";
+ } else {
+ if (needs.isTexturing()) {
+ fs << "gl_FragColor = texture2D(sampler, outTexCoords);";
+ if (needs.isY410BT2020()) {
+ fs << "gl_FragColor.rgb = convertY410BT2020(gl_FragColor.rgb);";
+ }
+ } else {
+ fs << "gl_FragColor.rgb = color.rgb;";
+ fs << "gl_FragColor.a = 1.0;";
+ }
+ if (needs.isOpaque()) {
+ fs << "gl_FragColor.a = 1.0;";
+ }
+ if (needs.hasAlpha()) {
+ // modulate the current alpha value with alpha set
+ if (needs.isPremultiplied()) {
+ // ... and the color too if we're premultiplied
+ fs << "gl_FragColor *= color.a;";
+ } else {
+ fs << "gl_FragColor.a *= color.a;";
+ }
+ }
+ }
+
+ if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
+ if (!needs.isOpaque() && needs.isPremultiplied()) {
+ // un-premultiply if needed before linearization
+ // avoid divide by 0 by adding 0.5/256 to the alpha channel
+ fs << "gl_FragColor.rgb = gl_FragColor.rgb / (gl_FragColor.a + 0.0019);";
+ }
+ fs << "gl_FragColor.rgb = "
+ "OETF(OutputTransform(OOTF(InputTransform(EOTF(gl_FragColor.rgb)))));";
+ if (!needs.isOpaque() && needs.isPremultiplied()) {
+ // and re-premultiply if needed after gamma correction
+ fs << "gl_FragColor.rgb = gl_FragColor.rgb * (gl_FragColor.a + 0.0019);";
+ }
+ }
+
+ if (needs.hasRoundedCorners()) {
+ if (needs.isPremultiplied()) {
+ fs << "gl_FragColor *= vec4(applyCornerRadius(outCropCoords));";
+ } else {
+ fs << "gl_FragColor.a *= applyCornerRadius(outCropCoords);";
+ }
+ }
+
+ fs << dedent << "}";
+ return fs.getString();
+}
+
+std::unique_ptr<Program> ProgramCache::generateProgram(const Key& needs) {
+ ATRACE_CALL();
+
+ // vertex shader
+ String8 vs = generateVertexShader(needs);
+
+ // fragment shader
+ String8 fs = generateFragmentShader(needs);
+
+ return std::make_unique<Program>(needs, vs.string(), fs.string());
+}
+
+void ProgramCache::useProgram(EGLContext context, const Description& description) {
+ // generate the key for the shader based on the description
+ Key needs(computeKey(description));
+
+ // look-up the program in the cache
+ auto& cache = mCaches[context];
+ auto it = cache.find(needs);
+ if (it == cache.end()) {
+ // we didn't find our program, so generate one...
+ nsecs_t time = systemTime();
+ it = cache.emplace(needs, generateProgram(needs)).first;
+ time = systemTime() - time;
+
+ ALOGV(">>> generated new program for context %p: needs=%08X, time=%u ms (%zu programs)",
+ context, needs.mKey, uint32_t(ns2ms(time)), cache.size());
+ }
+
+ // here we have a suitable program for this description
+ std::unique_ptr<Program>& program = it->second;
+ if (program->isValid()) {
+ program->use();
+ program->setUniforms(description);
+ }
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/ProgramCache.h b/media/libstagefright/renderfright/gl/ProgramCache.h
new file mode 100644
index 0000000..901e631
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/ProgramCache.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_PROGRAMCACHE_H
+#define SF_RENDER_ENGINE_PROGRAMCACHE_H
+
+#include <memory>
+#include <unordered_map>
+
+#include <EGL/egl.h>
+#include <GLES2/gl2.h>
+#include <renderengine/private/Description.h>
+#include <utils/Singleton.h>
+#include <utils/TypeHelpers.h>
+
+namespace android {
+
+class String8;
+
+namespace renderengine {
+
+struct Description;
+
+namespace gl {
+
+class Formatter;
+class Program;
+
+/*
+ * This class generates GLSL programs suitable to handle a given
+ * Description. It's responsible for figuring out what to
+ * generate from a Description.
+ * It also maintains a cache of these Programs.
+ */
+class ProgramCache : public Singleton<ProgramCache> {
+public:
+ /*
+ * Key is used to retrieve a Program in the cache.
+ * A Key is generated from a Description.
+ */
+ class Key {
+ friend class ProgramCache;
+ typedef uint32_t key_t;
+ key_t mKey;
+
+ public:
+ enum {
+ BLEND_SHIFT = 0,
+ BLEND_MASK = 1 << BLEND_SHIFT,
+ BLEND_PREMULT = 1 << BLEND_SHIFT,
+ BLEND_NORMAL = 0 << BLEND_SHIFT,
+
+ OPACITY_SHIFT = 1,
+ OPACITY_MASK = 1 << OPACITY_SHIFT,
+ OPACITY_OPAQUE = 1 << OPACITY_SHIFT,
+ OPACITY_TRANSLUCENT = 0 << OPACITY_SHIFT,
+
+ ALPHA_SHIFT = 2,
+ ALPHA_MASK = 1 << ALPHA_SHIFT,
+ ALPHA_LT_ONE = 1 << ALPHA_SHIFT,
+ ALPHA_EQ_ONE = 0 << ALPHA_SHIFT,
+
+ TEXTURE_SHIFT = 3,
+ TEXTURE_MASK = 3 << TEXTURE_SHIFT,
+ TEXTURE_OFF = 0 << TEXTURE_SHIFT,
+ TEXTURE_EXT = 1 << TEXTURE_SHIFT,
+ TEXTURE_2D = 2 << TEXTURE_SHIFT,
+
+ ROUNDED_CORNERS_SHIFT = 5,
+ ROUNDED_CORNERS_MASK = 1 << ROUNDED_CORNERS_SHIFT,
+ ROUNDED_CORNERS_OFF = 0 << ROUNDED_CORNERS_SHIFT,
+ ROUNDED_CORNERS_ON = 1 << ROUNDED_CORNERS_SHIFT,
+
+ INPUT_TRANSFORM_MATRIX_SHIFT = 6,
+ INPUT_TRANSFORM_MATRIX_MASK = 1 << INPUT_TRANSFORM_MATRIX_SHIFT,
+ INPUT_TRANSFORM_MATRIX_OFF = 0 << INPUT_TRANSFORM_MATRIX_SHIFT,
+ INPUT_TRANSFORM_MATRIX_ON = 1 << INPUT_TRANSFORM_MATRIX_SHIFT,
+
+ OUTPUT_TRANSFORM_MATRIX_SHIFT = 7,
+ OUTPUT_TRANSFORM_MATRIX_MASK = 1 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+ OUTPUT_TRANSFORM_MATRIX_OFF = 0 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+ OUTPUT_TRANSFORM_MATRIX_ON = 1 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+
+ INPUT_TF_SHIFT = 8,
+ INPUT_TF_MASK = 3 << INPUT_TF_SHIFT,
+ INPUT_TF_LINEAR = 0 << INPUT_TF_SHIFT,
+ INPUT_TF_SRGB = 1 << INPUT_TF_SHIFT,
+ INPUT_TF_ST2084 = 2 << INPUT_TF_SHIFT,
+ INPUT_TF_HLG = 3 << INPUT_TF_SHIFT,
+
+ OUTPUT_TF_SHIFT = 10,
+ OUTPUT_TF_MASK = 3 << OUTPUT_TF_SHIFT,
+ OUTPUT_TF_LINEAR = 0 << OUTPUT_TF_SHIFT,
+ OUTPUT_TF_SRGB = 1 << OUTPUT_TF_SHIFT,
+ OUTPUT_TF_ST2084 = 2 << OUTPUT_TF_SHIFT,
+ OUTPUT_TF_HLG = 3 << OUTPUT_TF_SHIFT,
+
+ Y410_BT2020_SHIFT = 12,
+ Y410_BT2020_MASK = 1 << Y410_BT2020_SHIFT,
+ Y410_BT2020_OFF = 0 << Y410_BT2020_SHIFT,
+ Y410_BT2020_ON = 1 << Y410_BT2020_SHIFT,
+
+ SHADOW_SHIFT = 13,
+ SHADOW_MASK = 1 << SHADOW_SHIFT,
+ SHADOW_OFF = 0 << SHADOW_SHIFT,
+ SHADOW_ON = 1 << SHADOW_SHIFT,
+ };
+
+ inline Key() : mKey(0) {}
+ inline Key(const Key& rhs) : mKey(rhs.mKey) {}
+
+ inline Key& set(key_t mask, key_t value) {
+ mKey = (mKey & ~mask) | value;
+ return *this;
+ }
+
+ inline bool isTexturing() const { return (mKey & TEXTURE_MASK) != TEXTURE_OFF; }
+ inline bool hasTextureCoords() const { return isTexturing() && !drawShadows(); }
+ inline int getTextureTarget() const { return (mKey & TEXTURE_MASK); }
+ inline bool isPremultiplied() const { return (mKey & BLEND_MASK) == BLEND_PREMULT; }
+ inline bool isOpaque() const { return (mKey & OPACITY_MASK) == OPACITY_OPAQUE; }
+ inline bool hasAlpha() const { return (mKey & ALPHA_MASK) == ALPHA_LT_ONE; }
+ inline bool hasRoundedCorners() const {
+ return (mKey & ROUNDED_CORNERS_MASK) == ROUNDED_CORNERS_ON;
+ }
+ inline bool drawShadows() const { return (mKey & SHADOW_MASK) == SHADOW_ON; }
+ inline bool hasInputTransformMatrix() const {
+ return (mKey & INPUT_TRANSFORM_MATRIX_MASK) == INPUT_TRANSFORM_MATRIX_ON;
+ }
+ inline bool hasOutputTransformMatrix() const {
+ return (mKey & OUTPUT_TRANSFORM_MATRIX_MASK) == OUTPUT_TRANSFORM_MATRIX_ON;
+ }
+ inline bool hasTransformMatrix() const {
+ return hasInputTransformMatrix() || hasOutputTransformMatrix();
+ }
+ inline int getInputTF() const { return (mKey & INPUT_TF_MASK); }
+ inline int getOutputTF() const { return (mKey & OUTPUT_TF_MASK); }
+
+ // When HDR and non-HDR contents are mixed, or different types of HDR contents are
+ // mixed, we will do a tone mapping process to tone map the input content to output
+ // content. Currently, the following conversions handled, they are:
+ // * SDR -> HLG
+ // * SDR -> PQ
+ // * HLG -> PQ
+ inline bool needsToneMapping() const {
+ int inputTF = getInputTF();
+ int outputTF = getOutputTF();
+
+ // Return false when converting from SDR to SDR.
+ if (inputTF == Key::INPUT_TF_SRGB && outputTF == Key::OUTPUT_TF_LINEAR) {
+ return false;
+ }
+ if (inputTF == Key::INPUT_TF_LINEAR && outputTF == Key::OUTPUT_TF_SRGB) {
+ return false;
+ }
+
+ inputTF >>= Key::INPUT_TF_SHIFT;
+ outputTF >>= Key::OUTPUT_TF_SHIFT;
+ return inputTF != outputTF;
+ }
+ inline bool isY410BT2020() const { return (mKey & Y410_BT2020_MASK) == Y410_BT2020_ON; }
+
+ // for use by std::unordered_map
+
+ bool operator==(const Key& other) const { return mKey == other.mKey; }
+
+ struct Hash {
+ size_t operator()(const Key& key) const { return static_cast<size_t>(key.mKey); }
+ };
+ };
+
+ ProgramCache() = default;
+ ~ProgramCache() = default;
+
+ // Generate shaders to populate the cache
+ void primeCache(const EGLContext context, bool useColorManagement, bool toneMapperShaderOnly);
+
+ size_t getSize(const EGLContext context) { return mCaches[context].size(); }
+
+ // useProgram lookup a suitable program in the cache or generates one
+ // if none can be found.
+ void useProgram(const EGLContext context, const Description& description);
+
+private:
+ // compute a cache Key from a Description
+ static Key computeKey(const Description& description);
+ // Generate EOTF based from Key.
+ static void generateEOTF(Formatter& fs, const Key& needs);
+ // Generate necessary tone mapping methods for OOTF.
+ static void generateToneMappingProcess(Formatter& fs, const Key& needs);
+ // Generate OOTF based from Key.
+ static void generateOOTF(Formatter& fs, const Key& needs);
+ // Generate OETF based from Key.
+ static void generateOETF(Formatter& fs, const Key& needs);
+ // generates a program from the Key
+ static std::unique_ptr<Program> generateProgram(const Key& needs);
+ // generates the vertex shader from the Key
+ static String8 generateVertexShader(const Key& needs);
+ // generates the fragment shader from the Key
+ static String8 generateFragmentShader(const Key& needs);
+
+ // Key/Value map used for caching Programs. Currently the cache
+ // is never shrunk (and the GL program objects are never deleted).
+ std::unordered_map<EGLContext, std::unordered_map<Key, std::unique_ptr<Program>, Key::Hash>>
+ mCaches;
+};
+
+} // namespace gl
+} // namespace renderengine
+
+ANDROID_BASIC_TYPES_TRAITS(renderengine::gl::ProgramCache::Key)
+
+} // namespace android
+
+#endif /* SF_RENDER_ENGINE_PROGRAMCACHE_H */
diff --git a/media/libstagefright/renderfright/gl/filters/BlurFilter.cpp b/media/libstagefright/renderfright/gl/filters/BlurFilter.cpp
new file mode 100644
index 0000000..19f18c0
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/filters/BlurFilter.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "BlurFilter.h"
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES3/gl3.h>
+#include <GLES3/gl3ext.h>
+#include <ui/GraphicTypes.h>
+#include <cstdint>
+
+#include <utils/Trace.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+BlurFilter::BlurFilter(GLESRenderEngine& engine)
+ : mEngine(engine),
+ mCompositionFbo(engine),
+ mPingFbo(engine),
+ mPongFbo(engine),
+ mMixProgram(engine),
+ mBlurProgram(engine) {
+ mMixProgram.compile(getVertexShader(), getMixFragShader());
+ mMPosLoc = mMixProgram.getAttributeLocation("aPosition");
+ mMUvLoc = mMixProgram.getAttributeLocation("aUV");
+ mMTextureLoc = mMixProgram.getUniformLocation("uTexture");
+ mMCompositionTextureLoc = mMixProgram.getUniformLocation("uCompositionTexture");
+ mMMixLoc = mMixProgram.getUniformLocation("uMix");
+
+ mBlurProgram.compile(getVertexShader(), getFragmentShader());
+ mBPosLoc = mBlurProgram.getAttributeLocation("aPosition");
+ mBUvLoc = mBlurProgram.getAttributeLocation("aUV");
+ mBTextureLoc = mBlurProgram.getUniformLocation("uTexture");
+ mBOffsetLoc = mBlurProgram.getUniformLocation("uOffset");
+
+ static constexpr auto size = 2.0f;
+ static constexpr auto translation = 1.0f;
+ const GLfloat vboData[] = {
+ // Vertex data
+ translation - size, -translation - size,
+ translation - size, -translation + size,
+ translation + size, -translation + size,
+ // UV data
+ 0.0f, 0.0f - translation,
+ 0.0f, size - translation,
+ size, size - translation
+ };
+ mMeshBuffer.allocateBuffers(vboData, 12 /* size */);
+}
+
+status_t BlurFilter::setAsDrawTarget(const DisplaySettings& display, uint32_t radius) {
+ ATRACE_NAME("BlurFilter::setAsDrawTarget");
+ mRadius = radius;
+ mDisplayX = display.physicalDisplay.left;
+ mDisplayY = display.physicalDisplay.top;
+
+ if (mDisplayWidth < display.physicalDisplay.width() ||
+ mDisplayHeight < display.physicalDisplay.height()) {
+ ATRACE_NAME("BlurFilter::allocatingTextures");
+
+ mDisplayWidth = display.physicalDisplay.width();
+ mDisplayHeight = display.physicalDisplay.height();
+ mCompositionFbo.allocateBuffers(mDisplayWidth, mDisplayHeight);
+
+ const uint32_t fboWidth = floorf(mDisplayWidth * kFboScale);
+ const uint32_t fboHeight = floorf(mDisplayHeight * kFboScale);
+ mPingFbo.allocateBuffers(fboWidth, fboHeight);
+ mPongFbo.allocateBuffers(fboWidth, fboHeight);
+
+ if (mPingFbo.getStatus() != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Invalid ping buffer");
+ return mPingFbo.getStatus();
+ }
+ if (mPongFbo.getStatus() != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Invalid pong buffer");
+ return mPongFbo.getStatus();
+ }
+ if (mCompositionFbo.getStatus() != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Invalid composition buffer");
+ return mCompositionFbo.getStatus();
+ }
+ if (!mBlurProgram.isValid()) {
+ ALOGE("Invalid shader");
+ return GL_INVALID_OPERATION;
+ }
+ }
+
+ mCompositionFbo.bind();
+ glViewport(0, 0, mCompositionFbo.getBufferWidth(), mCompositionFbo.getBufferHeight());
+ return NO_ERROR;
+}
+
+void BlurFilter::drawMesh(GLuint uv, GLuint position) {
+
+ glEnableVertexAttribArray(uv);
+ glEnableVertexAttribArray(position);
+ mMeshBuffer.bind();
+ glVertexAttribPointer(position, 2 /* size */, GL_FLOAT, GL_FALSE,
+ 2 * sizeof(GLfloat) /* stride */, 0 /* offset */);
+ glVertexAttribPointer(uv, 2 /* size */, GL_FLOAT, GL_FALSE, 0 /* stride */,
+ (GLvoid*)(6 * sizeof(GLfloat)) /* offset */);
+ mMeshBuffer.unbind();
+
+ // draw mesh
+ glDrawArrays(GL_TRIANGLES, 0 /* first */, 3 /* count */);
+}
+
+status_t BlurFilter::prepare() {
+ ATRACE_NAME("BlurFilter::prepare");
+
+ // Kawase is an approximation of Gaussian, but it behaves differently from it.
+ // A radius transformation is required for approximating them, and also to introduce
+ // non-integer steps, necessary to smoothly interpolate large radii.
+ const auto radius = mRadius / 6.0f;
+
+ // Calculate how many passes we'll do, based on the radius.
+ // Too many passes will make the operation expensive.
+ const auto passes = min(kMaxPasses, (uint32_t)ceil(radius));
+
+ const float radiusByPasses = radius / (float)passes;
+ const float stepX = radiusByPasses / (float)mCompositionFbo.getBufferWidth();
+ const float stepY = radiusByPasses / (float)mCompositionFbo.getBufferHeight();
+
+ // Let's start by downsampling and blurring the composited frame simultaneously.
+ mBlurProgram.useProgram();
+ glActiveTexture(GL_TEXTURE0);
+ glUniform1i(mBTextureLoc, 0);
+ glBindTexture(GL_TEXTURE_2D, mCompositionFbo.getTextureName());
+ glUniform2f(mBOffsetLoc, stepX, stepY);
+ glViewport(0, 0, mPingFbo.getBufferWidth(), mPingFbo.getBufferHeight());
+ mPingFbo.bind();
+ drawMesh(mBUvLoc, mBPosLoc);
+
+ // And now we'll ping pong between our textures, to accumulate the result of various offsets.
+ GLFramebuffer* read = &mPingFbo;
+ GLFramebuffer* draw = &mPongFbo;
+ glViewport(0, 0, draw->getBufferWidth(), draw->getBufferHeight());
+ for (auto i = 1; i < passes; i++) {
+ ATRACE_NAME("BlurFilter::renderPass");
+ draw->bind();
+
+ glBindTexture(GL_TEXTURE_2D, read->getTextureName());
+ glUniform2f(mBOffsetLoc, stepX * i, stepY * i);
+
+ drawMesh(mBUvLoc, mBPosLoc);
+
+ // Swap buffers for next iteration
+ auto tmp = draw;
+ draw = read;
+ read = tmp;
+ }
+ mLastDrawTarget = read;
+
+ return NO_ERROR;
+}
+
+status_t BlurFilter::render(bool multiPass) {
+ ATRACE_NAME("BlurFilter::render");
+
+ // Now let's scale our blur up. It will be interpolated with the larger composited
+ // texture for the first frames, to hide downscaling artifacts.
+ GLfloat mix = fmin(1.0, mRadius / kMaxCrossFadeRadius);
+
+ // When doing multiple passes, we cannot try to read mCompositionFbo, given that we'll
+ // be writing onto it. Let's disable the crossfade, otherwise we'd need 1 extra frame buffer,
+ // as large as the screen size.
+ if (mix >= 1 || multiPass) {
+ mLastDrawTarget->bindAsReadBuffer();
+ glBlitFramebuffer(0, 0, mLastDrawTarget->getBufferWidth(),
+ mLastDrawTarget->getBufferHeight(), mDisplayX, mDisplayY, mDisplayWidth,
+ mDisplayHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
+ return NO_ERROR;
+ }
+
+ mMixProgram.useProgram();
+ glUniform1f(mMMixLoc, mix);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, mLastDrawTarget->getTextureName());
+ glUniform1i(mMTextureLoc, 0);
+ glActiveTexture(GL_TEXTURE1);
+ glBindTexture(GL_TEXTURE_2D, mCompositionFbo.getTextureName());
+ glUniform1i(mMCompositionTextureLoc, 1);
+
+ drawMesh(mMUvLoc, mMPosLoc);
+
+ glUseProgram(0);
+ glActiveTexture(GL_TEXTURE0);
+ mEngine.checkErrors("Drawing blur mesh");
+ return NO_ERROR;
+}
+
+string BlurFilter::getVertexShader() const {
+ return R"SHADER(#version 310 es
+ precision mediump float;
+
+ in vec2 aPosition;
+ in highp vec2 aUV;
+ out highp vec2 vUV;
+
+ void main() {
+ vUV = aUV;
+ gl_Position = vec4(aPosition, 0.0, 1.0);
+ }
+ )SHADER";
+}
+
+string BlurFilter::getFragmentShader() const {
+ return R"SHADER(#version 310 es
+ precision mediump float;
+
+ uniform sampler2D uTexture;
+ uniform vec2 uOffset;
+
+ in highp vec2 vUV;
+ out vec4 fragColor;
+
+ void main() {
+ fragColor = texture(uTexture, vUV, 0.0);
+ fragColor += texture(uTexture, vUV + vec2( uOffset.x, uOffset.y), 0.0);
+ fragColor += texture(uTexture, vUV + vec2( uOffset.x, -uOffset.y), 0.0);
+ fragColor += texture(uTexture, vUV + vec2(-uOffset.x, uOffset.y), 0.0);
+ fragColor += texture(uTexture, vUV + vec2(-uOffset.x, -uOffset.y), 0.0);
+
+ fragColor = vec4(fragColor.rgb * 0.2, 1.0);
+ }
+ )SHADER";
+}
+
+string BlurFilter::getMixFragShader() const {
+ string shader = R"SHADER(#version 310 es
+ precision mediump float;
+
+ in highp vec2 vUV;
+ out vec4 fragColor;
+
+ uniform sampler2D uCompositionTexture;
+ uniform sampler2D uTexture;
+ uniform float uMix;
+
+ void main() {
+ vec4 blurred = texture(uTexture, vUV);
+ vec4 composition = texture(uCompositionTexture, vUV);
+ fragColor = mix(composition, blurred, uMix);
+ }
+ )SHADER";
+ return shader;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/filters/BlurFilter.h b/media/libstagefright/renderfright/gl/filters/BlurFilter.h
new file mode 100644
index 0000000..593a8fd
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/filters/BlurFilter.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <ui/GraphicTypes.h>
+#include "../GLESRenderEngine.h"
+#include "../GLFramebuffer.h"
+#include "../GLVertexBuffer.h"
+#include "GenericProgram.h"
+
+using namespace std;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+/**
+ * This is an implementation of a Kawase blur, as described in here:
+ * https://community.arm.com/cfs-file/__key/communityserver-blogs-components-weblogfiles/
+ * 00-00-00-20-66/siggraph2015_2D00_mmg_2D00_marius_2D00_notes.pdf
+ */
+class BlurFilter {
+public:
+ // Downsample FBO to improve performance
+ static constexpr float kFboScale = 0.25f;
+ // Maximum number of render passes
+ static constexpr uint32_t kMaxPasses = 4;
+ // To avoid downscaling artifacts, we interpolate the blurred fbo with the full composited
+ // image, up to this radius.
+ static constexpr float kMaxCrossFadeRadius = 30.0f;
+
+ explicit BlurFilter(GLESRenderEngine& engine);
+ virtual ~BlurFilter(){};
+
+ // Set up render targets, redirecting output to offscreen texture.
+ status_t setAsDrawTarget(const DisplaySettings&, uint32_t radius);
+ // Execute blur passes, rendering to offscreen texture.
+ status_t prepare();
+ // Render blur to the bound framebuffer (screen).
+ status_t render(bool multiPass);
+
+private:
+ uint32_t mRadius;
+ void drawMesh(GLuint uv, GLuint position);
+ string getVertexShader() const;
+ string getFragmentShader() const;
+ string getMixFragShader() const;
+
+ GLESRenderEngine& mEngine;
+ // Frame buffer holding the composited background.
+ GLFramebuffer mCompositionFbo;
+ // Frame buffers holding the blur passes.
+ GLFramebuffer mPingFbo;
+ GLFramebuffer mPongFbo;
+ uint32_t mDisplayWidth = 0;
+ uint32_t mDisplayHeight = 0;
+ uint32_t mDisplayX = 0;
+ uint32_t mDisplayY = 0;
+ // Buffer holding the final blur pass.
+ GLFramebuffer* mLastDrawTarget;
+
+ // VBO containing vertex and uv data of a fullscreen triangle.
+ GLVertexBuffer mMeshBuffer;
+
+ GenericProgram mMixProgram;
+ GLuint mMPosLoc;
+ GLuint mMUvLoc;
+ GLuint mMMixLoc;
+ GLuint mMTextureLoc;
+ GLuint mMCompositionTextureLoc;
+
+ GenericProgram mBlurProgram;
+ GLuint mBPosLoc;
+ GLuint mBUvLoc;
+ GLuint mBTextureLoc;
+ GLuint mBOffsetLoc;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/filters/GenericProgram.cpp b/media/libstagefright/renderfright/gl/filters/GenericProgram.cpp
new file mode 100644
index 0000000..bb35889
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/filters/GenericProgram.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenericProgram.h"
+
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GenericProgram::GenericProgram(GLESRenderEngine& engine) : mEngine(engine) {}
+
+GenericProgram::~GenericProgram() {
+ if (mVertexShaderHandle != 0) {
+ if (mProgramHandle != 0) {
+ glDetachShader(mProgramHandle, mVertexShaderHandle);
+ }
+ glDeleteShader(mVertexShaderHandle);
+ }
+
+ if (mFragmentShaderHandle != 0) {
+ if (mProgramHandle != 0) {
+ glDetachShader(mProgramHandle, mFragmentShaderHandle);
+ }
+ glDeleteShader(mFragmentShaderHandle);
+ }
+
+ if (mProgramHandle != 0) {
+ glDeleteProgram(mProgramHandle);
+ }
+}
+
+void GenericProgram::compile(string vertexShader, string fragmentShader) {
+ mVertexShaderHandle = compileShader(GL_VERTEX_SHADER, vertexShader);
+ mFragmentShaderHandle = compileShader(GL_FRAGMENT_SHADER, fragmentShader);
+ if (mVertexShaderHandle == 0 || mFragmentShaderHandle == 0) {
+ ALOGE("Aborting program creation.");
+ return;
+ }
+ mProgramHandle = createAndLink(mVertexShaderHandle, mFragmentShaderHandle);
+ mEngine.checkErrors("Linking program");
+}
+
+void GenericProgram::useProgram() const {
+ glUseProgram(mProgramHandle);
+}
+
+GLuint GenericProgram::compileShader(GLuint type, string src) const {
+ const GLuint shader = glCreateShader(type);
+ if (shader == 0) {
+ mEngine.checkErrors("Creating shader");
+ return 0;
+ }
+ const GLchar* charSrc = (const GLchar*)src.c_str();
+ glShaderSource(shader, 1, &charSrc, nullptr);
+ glCompileShader(shader);
+
+ GLint isCompiled = 0;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &isCompiled);
+ if (isCompiled == GL_FALSE) {
+ GLint maxLength = 0;
+ glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &maxLength);
+ string errorLog;
+ errorLog.reserve(maxLength);
+ glGetShaderInfoLog(shader, maxLength, &maxLength, errorLog.data());
+ glDeleteShader(shader);
+ ALOGE("Error compiling shader: %s", errorLog.c_str());
+ return 0;
+ }
+ return shader;
+}
+GLuint GenericProgram::createAndLink(GLuint vertexShader, GLuint fragmentShader) const {
+ const GLuint program = glCreateProgram();
+ mEngine.checkErrors("Creating program");
+
+ glAttachShader(program, vertexShader);
+ glAttachShader(program, fragmentShader);
+ glLinkProgram(program);
+ mEngine.checkErrors("Linking program");
+ return program;
+}
+
+GLuint GenericProgram::getUniformLocation(const string name) const {
+ if (mProgramHandle == 0) {
+ ALOGE("Can't get location of %s on an invalid program.", name.c_str());
+ return -1;
+ }
+ return glGetUniformLocation(mProgramHandle, (const GLchar*)name.c_str());
+}
+
+GLuint GenericProgram::getAttributeLocation(const string name) const {
+ if (mProgramHandle == 0) {
+ ALOGE("Can't get location of %s on an invalid program.", name.c_str());
+ return -1;
+ }
+ return glGetAttribLocation(mProgramHandle, (const GLchar*)name.c_str());
+}
+
+bool GenericProgram::isValid() const {
+ return mProgramHandle != 0;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/filters/GenericProgram.h b/media/libstagefright/renderfright/gl/filters/GenericProgram.h
new file mode 100644
index 0000000..6da2a5a
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/filters/GenericProgram.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <ui/GraphicTypes.h>
+#include "../GLESRenderEngine.h"
+#include "../GLFramebuffer.h"
+
+using namespace std;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GenericProgram {
+public:
+ explicit GenericProgram(GLESRenderEngine& renderEngine);
+ ~GenericProgram();
+ void compile(string vertexShader, string fragmentShader);
+ bool isValid() const;
+ void useProgram() const;
+ GLuint getAttributeLocation(const string name) const;
+ GLuint getUniformLocation(const string name) const;
+
+private:
+ GLuint compileShader(GLuint type, const string src) const;
+ GLuint createAndLink(GLuint vertexShader, GLuint fragmentShader) const;
+
+ GLESRenderEngine& mEngine;
+ GLuint mVertexShaderHandle = 0;
+ GLuint mFragmentShaderHandle = 0;
+ GLuint mProgramHandle = 0;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/DisplaySettings.h b/media/libstagefright/renderfright/include/renderengine/DisplaySettings.h
new file mode 100644
index 0000000..ca16d2c
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/DisplaySettings.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <iosfwd>
+
+#include <math/mat4.h>
+#include <ui/GraphicTypes.h>
+#include <ui/Rect.h>
+#include <ui/Region.h>
+#include <ui/Transform.h>
+
+namespace android {
+namespace renderengine {
+
+// DisplaySettings contains the settings that are applicable when drawing all
+// layers for a given display.
+struct DisplaySettings {
+ // Rectangle describing the physical display. We will project from the
+ // logical clip onto this rectangle.
+ Rect physicalDisplay = Rect::INVALID_RECT;
+
+ // Rectangle bounded by the x,y- clipping planes in the logical display, so
+ // that the orthographic projection matrix can be computed. When
+ // constructing this matrix, z-coordinate bound are assumed to be at z=0 and
+ // z=1.
+ Rect clip = Rect::INVALID_RECT;
+
+ // Maximum luminance pulled from the display's HDR capabilities.
+ float maxLuminance = 1.0f;
+
+ // Output dataspace that will be populated if wide color gamut is used, or
+ // DataSpace::UNKNOWN otherwise.
+ ui::Dataspace outputDataspace = ui::Dataspace::UNKNOWN;
+
+ // Additional color transform to apply in linear space after transforming
+ // to the output dataspace.
+ mat4 colorTransform = mat4();
+
+ // Region that will be cleared to (0, 0, 0, 1) prior to rendering.
+ // This is specified in layer-stack space.
+ Region clearRegion = Region::INVALID_REGION;
+
+ // An additional orientation flag to be applied after clipping the output.
+ // By way of example, this may be used for supporting fullscreen screenshot
+ // capture of a device in landscape while the buffer is in portrait
+ // orientation.
+ uint32_t orientation = ui::Transform::ROT_0;
+};
+
+static inline bool operator==(const DisplaySettings& lhs, const DisplaySettings& rhs) {
+ return lhs.physicalDisplay == rhs.physicalDisplay && lhs.clip == rhs.clip &&
+ lhs.maxLuminance == rhs.maxLuminance && lhs.outputDataspace == rhs.outputDataspace &&
+ lhs.colorTransform == rhs.colorTransform &&
+ lhs.clearRegion.hasSameRects(rhs.clearRegion) && lhs.orientation == rhs.orientation;
+}
+
+// Defining PrintTo helps with Google Tests.
+static inline void PrintTo(const DisplaySettings& settings, ::std::ostream* os) {
+ *os << "DisplaySettings {";
+ *os << "\n .physicalDisplay = ";
+ PrintTo(settings.physicalDisplay, os);
+ *os << "\n .clip = ";
+ PrintTo(settings.clip, os);
+ *os << "\n .maxLuminance = " << settings.maxLuminance;
+ *os << "\n .outputDataspace = ";
+ PrintTo(settings.outputDataspace, os);
+ *os << "\n .colorTransform = " << settings.colorTransform;
+ *os << "\n .clearRegion = ";
+ PrintTo(settings.clearRegion, os);
+ *os << "\n .orientation = " << settings.orientation;
+ *os << "\n}";
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/Framebuffer.h b/media/libstagefright/renderfright/include/renderengine/Framebuffer.h
new file mode 100644
index 0000000..6511127
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/Framebuffer.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+
+class Framebuffer {
+public:
+ virtual ~Framebuffer() = default;
+
+ virtual bool setNativeWindowBuffer(ANativeWindowBuffer* nativeBuffer, bool isProtected,
+ const bool useFramebufferCache) = 0;
+};
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/Image.h b/media/libstagefright/renderfright/include/renderengine/Image.h
new file mode 100644
index 0000000..3bb4731
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/Image.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+
+class Image {
+public:
+ virtual ~Image() = default;
+ virtual bool setNativeWindowBuffer(ANativeWindowBuffer* buffer, bool isProtected) = 0;
+};
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/LayerSettings.h b/media/libstagefright/renderfright/include/renderengine/LayerSettings.h
new file mode 100644
index 0000000..95e9367
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/LayerSettings.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <iosfwd>
+
+#include <math/mat4.h>
+#include <math/vec3.h>
+#include <renderengine/Texture.h>
+#include <ui/Fence.h>
+#include <ui/FloatRect.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/GraphicTypes.h>
+#include <ui/Rect.h>
+#include <ui/Region.h>
+#include <ui/Transform.h>
+
+namespace android {
+namespace renderengine {
+
+// Metadata describing the input buffer to render from.
+struct Buffer {
+ // Buffer containing the image that we will render.
+ // If buffer == nullptr, then the rest of the fields in this struct will be
+ // ignored.
+ sp<GraphicBuffer> buffer = nullptr;
+
+ // Fence that will fire when the buffer is ready to be bound.
+ sp<Fence> fence = nullptr;
+
+ // Texture identifier to bind the external texture to.
+ // TODO(alecmouri): This is GL-specific...make the type backend-agnostic.
+ uint32_t textureName = 0;
+
+ // Whether to use filtering when rendering the texture.
+ bool useTextureFiltering = false;
+
+ // Transform matrix to apply to texture coordinates.
+ mat4 textureTransform = mat4();
+
+ // Whether to use pre-multiplied alpha.
+ bool usePremultipliedAlpha = true;
+
+ // Override flag that alpha for each pixel in the buffer *must* be 1.0.
+ // LayerSettings::alpha is still used if isOpaque==true - this flag only
+ // overrides the alpha channel of the buffer.
+ bool isOpaque = false;
+
+ // HDR color-space setting for Y410.
+ bool isY410BT2020 = false;
+ float maxMasteringLuminance = 0.0;
+ float maxContentLuminance = 0.0;
+};
+
+// Metadata describing the layer geometry.
+struct Geometry {
+ // Boundaries of the layer.
+ FloatRect boundaries = FloatRect();
+
+ // Transform matrix to apply to mesh coordinates.
+ mat4 positionTransform = mat4();
+
+ // Radius of rounded corners, if greater than 0. Otherwise, this layer's
+ // corners are not rounded.
+ // Having corner radius will force GPU composition on the layer and its children, drawing it
+ // with a special shader. The shader will receive the radius and the crop rectangle as input,
+ // modifying the opacity of the destination texture, multiplying it by a number between 0 and 1.
+ // We query Layer#getRoundedCornerState() to retrieve the radius as well as the rounded crop
+ // rectangle to figure out how to apply the radius for this layer. The crop rectangle will be
+ // in local layer coordinate space, so we have to take the layer transform into account when
+ // walking up the tree.
+ float roundedCornersRadius = 0.0;
+
+ // Rectangle within which corners will be rounded.
+ FloatRect roundedCornersCrop = FloatRect();
+};
+
+// Descriptor of the source pixels for this layer.
+struct PixelSource {
+ // Source buffer
+ Buffer buffer = Buffer();
+
+ // The solid color with which to fill the layer.
+ // This should only be populated if we don't render from an application
+ // buffer.
+ half3 solidColor = half3(0.0f, 0.0f, 0.0f);
+};
+
+/*
+ * Contains the configuration for the shadows drawn by single layer. Shadow follows
+ * material design guidelines.
+ */
+struct ShadowSettings {
+ // Color to the ambient shadow. The alpha is premultiplied.
+ vec4 ambientColor = vec4();
+
+ // Color to the spot shadow. The alpha is premultiplied. The position of the spot shadow
+ // depends on the light position.
+ vec4 spotColor = vec4();
+
+ // Position of the light source used to cast the spot shadow.
+ vec3 lightPos = vec3();
+
+ // Radius of the spot light source. Smaller radius will have sharper edges,
+ // larger radius will have softer shadows
+ float lightRadius = 0.f;
+
+ // Length of the cast shadow. If length is <= 0.f no shadows will be drawn.
+ float length = 0.f;
+
+ // If true fill in the casting layer is translucent and the shadow needs to fill the bounds.
+ // Otherwise the shadow will only be drawn around the edges of the casting layer.
+ bool casterIsTranslucent = false;
+};
+
+// The settings that RenderEngine requires for correctly rendering a Layer.
+struct LayerSettings {
+ // Geometry information
+ Geometry geometry = Geometry();
+
+ // Source pixels for this layer.
+ PixelSource source = PixelSource();
+
+ // Alpha option to blend with the source pixels
+ half alpha = half(0.0);
+
+ // Color space describing how the source pixels should be interpreted.
+ ui::Dataspace sourceDataspace = ui::Dataspace::UNKNOWN;
+
+ // Additional layer-specific color transform to be applied before the global
+ // transform.
+ mat4 colorTransform = mat4();
+
+ // True if blending will be forced to be disabled.
+ bool disableBlending = false;
+
+ ShadowSettings shadow;
+
+ int backgroundBlurRadius = 0;
+};
+
+// Keep in sync with custom comparison function in
+// compositionengine/impl/ClientCompositionRequestCache.cpp
+static inline bool operator==(const Buffer& lhs, const Buffer& rhs) {
+ return lhs.buffer == rhs.buffer && lhs.fence == rhs.fence &&
+ lhs.textureName == rhs.textureName &&
+ lhs.useTextureFiltering == rhs.useTextureFiltering &&
+ lhs.textureTransform == rhs.textureTransform &&
+ lhs.usePremultipliedAlpha == rhs.usePremultipliedAlpha &&
+ lhs.isOpaque == rhs.isOpaque && lhs.isY410BT2020 == rhs.isY410BT2020 &&
+ lhs.maxMasteringLuminance == rhs.maxMasteringLuminance &&
+ lhs.maxContentLuminance == rhs.maxContentLuminance;
+}
+
+static inline bool operator==(const Geometry& lhs, const Geometry& rhs) {
+ return lhs.boundaries == rhs.boundaries && lhs.positionTransform == rhs.positionTransform &&
+ lhs.roundedCornersRadius == rhs.roundedCornersRadius &&
+ lhs.roundedCornersCrop == rhs.roundedCornersCrop;
+}
+
+static inline bool operator==(const PixelSource& lhs, const PixelSource& rhs) {
+ return lhs.buffer == rhs.buffer && lhs.solidColor == rhs.solidColor;
+}
+
+static inline bool operator==(const ShadowSettings& lhs, const ShadowSettings& rhs) {
+ return lhs.ambientColor == rhs.ambientColor && lhs.spotColor == rhs.spotColor &&
+ lhs.lightPos == rhs.lightPos && lhs.lightRadius == rhs.lightRadius &&
+ lhs.length == rhs.length && lhs.casterIsTranslucent == rhs.casterIsTranslucent;
+}
+
+static inline bool operator==(const LayerSettings& lhs, const LayerSettings& rhs) {
+ return lhs.geometry == rhs.geometry && lhs.source == rhs.source && lhs.alpha == rhs.alpha &&
+ lhs.sourceDataspace == rhs.sourceDataspace &&
+ lhs.colorTransform == rhs.colorTransform &&
+ lhs.disableBlending == rhs.disableBlending && lhs.shadow == rhs.shadow &&
+ lhs.backgroundBlurRadius == rhs.backgroundBlurRadius;
+}
+
+// Defining PrintTo helps with Google Tests.
+
+static inline void PrintTo(const Buffer& settings, ::std::ostream* os) {
+ *os << "Buffer {";
+ *os << "\n .buffer = " << settings.buffer.get();
+ *os << "\n .fence = " << settings.fence.get();
+ *os << "\n .textureName = " << settings.textureName;
+ *os << "\n .useTextureFiltering = " << settings.useTextureFiltering;
+ *os << "\n .textureTransform = " << settings.textureTransform;
+ *os << "\n .usePremultipliedAlpha = " << settings.usePremultipliedAlpha;
+ *os << "\n .isOpaque = " << settings.isOpaque;
+ *os << "\n .isY410BT2020 = " << settings.isY410BT2020;
+ *os << "\n .maxMasteringLuminance = " << settings.maxMasteringLuminance;
+ *os << "\n .maxContentLuminance = " << settings.maxContentLuminance;
+ *os << "\n}";
+}
+
+static inline void PrintTo(const Geometry& settings, ::std::ostream* os) {
+ *os << "Geometry {";
+ *os << "\n .boundaries = ";
+ PrintTo(settings.boundaries, os);
+ *os << "\n .positionTransform = " << settings.positionTransform;
+ *os << "\n .roundedCornersRadius = " << settings.roundedCornersRadius;
+ *os << "\n .roundedCornersCrop = ";
+ PrintTo(settings.roundedCornersCrop, os);
+ *os << "\n}";
+}
+
+static inline void PrintTo(const PixelSource& settings, ::std::ostream* os) {
+ *os << "PixelSource {";
+ *os << "\n .buffer = ";
+ PrintTo(settings.buffer, os);
+ *os << "\n .solidColor = " << settings.solidColor;
+ *os << "\n}";
+}
+
+static inline void PrintTo(const ShadowSettings& settings, ::std::ostream* os) {
+ *os << "ShadowSettings {";
+ *os << "\n .ambientColor = " << settings.ambientColor;
+ *os << "\n .spotColor = " << settings.spotColor;
+ *os << "\n .lightPos = " << settings.lightPos;
+ *os << "\n .lightRadius = " << settings.lightRadius;
+ *os << "\n .length = " << settings.length;
+ *os << "\n .casterIsTranslucent = " << settings.casterIsTranslucent;
+ *os << "\n}";
+}
+
+static inline void PrintTo(const LayerSettings& settings, ::std::ostream* os) {
+ *os << "LayerSettings {";
+ *os << "\n .geometry = ";
+ PrintTo(settings.geometry, os);
+ *os << "\n .source = ";
+ PrintTo(settings.source, os);
+ *os << "\n .alpha = " << settings.alpha;
+ *os << "\n .sourceDataspace = ";
+ PrintTo(settings.sourceDataspace, os);
+ *os << "\n .colorTransform = " << settings.colorTransform;
+ *os << "\n .disableBlending = " << settings.disableBlending;
+ *os << "\n .backgroundBlurRadius = " << settings.backgroundBlurRadius;
+ *os << "\n .shadow = ";
+ PrintTo(settings.shadow, os);
+ *os << "\n}";
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/Mesh.h b/media/libstagefright/renderfright/include/renderengine/Mesh.h
new file mode 100644
index 0000000..167f13f
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/Mesh.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_MESH_H
+#define SF_RENDER_ENGINE_MESH_H
+
+#include <vector>
+
+#include <stdint.h>
+
+namespace android {
+namespace renderengine {
+
+class Mesh {
+public:
+ class Builder;
+
+ enum Primitive {
+ TRIANGLES = 0x0004, // GL_TRIANGLES
+ TRIANGLE_STRIP = 0x0005, // GL_TRIANGLE_STRIP
+ TRIANGLE_FAN = 0x0006 // GL_TRIANGLE_FAN
+ };
+
+ ~Mesh() = default;
+
+ /*
+ * VertexArray handles the stride automatically.
+ */
+ template <typename TYPE>
+ class VertexArray {
+ friend class Mesh;
+ float* mData;
+ size_t mStride;
+ size_t mOffset = 0;
+ VertexArray(float* data, size_t stride) : mData(data), mStride(stride) {}
+
+ public:
+ // Returns a vertex array at an offset so its easier to append attributes from
+ // multiple sources.
+ VertexArray(VertexArray<TYPE>& other, size_t offset)
+ : mData(other.mData), mStride(other.mStride), mOffset(offset) {}
+
+ TYPE& operator[](size_t index) {
+ return *reinterpret_cast<TYPE*>(&mData[(index + mOffset) * mStride]);
+ }
+ TYPE const& operator[](size_t index) const {
+ return *reinterpret_cast<TYPE const*>(&mData[(index + mOffset) * mStride]);
+ }
+ };
+
+ template <typename TYPE>
+ VertexArray<TYPE> getPositionArray() {
+ return VertexArray<TYPE>(getPositions(), mStride);
+ }
+
+ template <typename TYPE>
+ VertexArray<TYPE> getTexCoordArray() {
+ return VertexArray<TYPE>(getTexCoords(), mStride);
+ }
+
+ template <typename TYPE>
+ VertexArray<TYPE> getCropCoordArray() {
+ return VertexArray<TYPE>(getCropCoords(), mStride);
+ }
+
+ template <typename TYPE>
+ VertexArray<TYPE> getShadowColorArray() {
+ return VertexArray<TYPE>(getShadowColor(), mStride);
+ }
+
+ template <typename TYPE>
+ VertexArray<TYPE> getShadowParamsArray() {
+ return VertexArray<TYPE>(getShadowParams(), mStride);
+ }
+
+ uint16_t* getIndicesArray() { return getIndices(); }
+
+ Primitive getPrimitive() const;
+
+ // returns a pointer to the vertices positions
+ float const* getPositions() const;
+
+ // returns a pointer to the vertices texture coordinates
+ float const* getTexCoords() const;
+
+ // returns a pointer to the vertices crop coordinates
+ float const* getCropCoords() const;
+
+ // returns a pointer to colors
+ float const* getShadowColor() const;
+
+ // returns a pointer to the shadow params
+ float const* getShadowParams() const;
+
+ // returns a pointer to indices
+ uint16_t const* getIndices() const;
+
+ // number of vertices in this mesh
+ size_t getVertexCount() const;
+
+ // dimension of vertices
+ size_t getVertexSize() const;
+
+ // dimension of texture coordinates
+ size_t getTexCoordsSize() const;
+
+ size_t getShadowParamsSize() const;
+
+ size_t getShadowColorSize() const;
+
+ size_t getIndexCount() const;
+
+ // return stride in bytes
+ size_t getByteStride() const;
+
+ // return stride in floats
+ size_t getStride() const;
+
+private:
+ Mesh(Primitive primitive, size_t vertexCount, size_t vertexSize, size_t texCoordSize,
+ size_t cropCoordsSize, size_t shadowColorSize, size_t shadowParamsSize, size_t indexCount);
+ Mesh(const Mesh&);
+ Mesh& operator=(const Mesh&);
+ Mesh const& operator=(const Mesh&) const;
+
+ float* getPositions();
+ float* getTexCoords();
+ float* getCropCoords();
+ float* getShadowColor();
+ float* getShadowParams();
+ uint16_t* getIndices();
+
+ std::vector<float> mVertices;
+ size_t mVertexCount;
+ size_t mVertexSize;
+ size_t mTexCoordsSize;
+ size_t mCropCoordsSize;
+ size_t mShadowColorSize;
+ size_t mShadowParamsSize;
+ size_t mStride;
+ Primitive mPrimitive;
+ std::vector<uint16_t> mIndices;
+ size_t mIndexCount;
+};
+
+class Mesh::Builder {
+public:
+ Builder& setPrimitive(Primitive primitive) {
+ mPrimitive = primitive;
+ return *this;
+ };
+ Builder& setVertices(size_t vertexCount, size_t vertexSize) {
+ mVertexCount = vertexCount;
+ mVertexSize = vertexSize;
+ return *this;
+ };
+ Builder& setTexCoords(size_t texCoordsSize) {
+ mTexCoordsSize = texCoordsSize;
+ return *this;
+ };
+ Builder& setCropCoords(size_t cropCoordsSize) {
+ mCropCoordsSize = cropCoordsSize;
+ return *this;
+ };
+ Builder& setShadowAttrs() {
+ mShadowParamsSize = 3;
+ mShadowColorSize = 4;
+ return *this;
+ };
+ Builder& setIndices(size_t indexCount) {
+ mIndexCount = indexCount;
+ return *this;
+ };
+ Mesh build() const {
+ return Mesh{mPrimitive, mVertexCount, mVertexSize, mTexCoordsSize,
+ mCropCoordsSize, mShadowColorSize, mShadowParamsSize, mIndexCount};
+ }
+
+private:
+ size_t mVertexCount = 0;
+ size_t mVertexSize = 0;
+ size_t mTexCoordsSize = 0;
+ size_t mCropCoordsSize = 0;
+ size_t mShadowColorSize = 0;
+ size_t mShadowParamsSize = 0;
+ size_t mIndexCount = 0;
+ Primitive mPrimitive;
+};
+
+} // namespace renderengine
+} // namespace android
+#endif /* SF_RENDER_ENGINE_MESH_H */
diff --git a/media/libstagefright/renderfright/include/renderengine/RenderEngine.h b/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
new file mode 100644
index 0000000..09a0f65
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDERENGINE_H_
+#define SF_RENDERENGINE_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <memory>
+
+#include <android-base/unique_fd.h>
+#include <math/mat4.h>
+#include <renderengine/DisplaySettings.h>
+#include <renderengine/Framebuffer.h>
+#include <renderengine/Image.h>
+#include <renderengine/LayerSettings.h>
+#include <ui/GraphicTypes.h>
+#include <ui/Transform.h>
+
+/**
+ * Allows to set RenderEngine backend to GLES (default) or Vulkan (NOT yet supported).
+ */
+#define PROPERTY_DEBUG_RENDERENGINE_BACKEND "debug.renderengine.backend"
+
+struct ANativeWindowBuffer;
+
+namespace android {
+
+class Rect;
+class Region;
+
+namespace renderengine {
+
+class BindNativeBufferAsFramebuffer;
+class Image;
+class Mesh;
+class Texture;
+struct RenderEngineCreationArgs;
+
+namespace threaded {
+class RenderEngineThreaded;
+}
+
+namespace impl {
+class RenderEngine;
+}
+
+enum class Protection {
+ UNPROTECTED = 1,
+ PROTECTED = 2,
+};
+
+class RenderEngine {
+public:
+ enum class ContextPriority {
+ LOW = 1,
+ MEDIUM = 2,
+ HIGH = 3,
+ };
+
+ enum class RenderEngineType {
+ GLES = 1,
+ THREADED = 2,
+ };
+
+ static std::unique_ptr<RenderEngine> create(const RenderEngineCreationArgs& args);
+
+ virtual ~RenderEngine() = 0;
+
+ // ----- BEGIN DEPRECATED INTERFACE -----
+ // This interface, while still in use until a suitable replacement is built,
+ // should be considered deprecated, minus some methods which still may be
+ // used to support legacy behavior.
+ virtual void primeCache() const = 0;
+
+ // dump the extension strings. always call the base class.
+ virtual void dump(std::string& result) = 0;
+
+ virtual bool useNativeFenceSync() const = 0;
+ virtual bool useWaitSync() const = 0;
+ virtual void genTextures(size_t count, uint32_t* names) = 0;
+ virtual void deleteTextures(size_t count, uint32_t const* names) = 0;
+ virtual void bindExternalTextureImage(uint32_t texName, const Image& image) = 0;
+ // Legacy public method used by devices that don't support native fence
+ // synchronization in their GPU driver, as this method provides implicit
+ // synchronization for latching buffers.
+ virtual status_t bindExternalTextureBuffer(uint32_t texName, const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& fence) = 0;
+ // Caches Image resources for this buffer, but does not bind the buffer to
+ // a particular texture.
+ // Note that work is deferred to an additional thread, i.e. this call
+ // is made asynchronously, but the caller can expect that cache/unbind calls
+ // are performed in a manner that's conflict serializable, i.e. unbinding
+ // a buffer should never occur before binding the buffer if the caller
+ // called {bind, cache}ExternalTextureBuffer before calling unbind.
+ virtual void cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) = 0;
+ // Removes internal resources referenced by the bufferId. This method should be
+ // invoked when the caller will no longer hold a reference to a GraphicBuffer
+ // and needs to clean up its resources.
+ // Note that work is deferred to an additional thread, i.e. this call
+ // is made asynchronously, but the caller can expect that cache/unbind calls
+ // are performed in a manner that's conflict serializable, i.e. unbinding
+ // a buffer should never occur before binding the buffer if the caller
+ // called {bind, cache}ExternalTextureBuffer before calling unbind.
+ virtual void unbindExternalTextureBuffer(uint64_t bufferId) = 0;
+ // When binding a native buffer, it must be done before setViewportAndProjection
+ // Returns NO_ERROR when binds successfully, NO_MEMORY when there's no memory for allocation.
+ virtual status_t bindFrameBuffer(Framebuffer* framebuffer) = 0;
+ virtual void unbindFrameBuffer(Framebuffer* framebuffer) = 0;
+
+ enum class CleanupMode {
+ CLEAN_OUTPUT_RESOURCES,
+ CLEAN_ALL,
+ };
+ // Clean-up method that should be called on the main thread after the
+ // drawFence returned by drawLayers fires. This method will free up
+ // resources used by the most recently drawn frame. If the frame is still
+ // being drawn, then this call is silently ignored.
+ //
+ // If mode is CLEAN_OUTPUT_RESOURCES, then only resources related to the
+ // output framebuffer are cleaned up, including the sibling texture.
+ //
+ // If mode is CLEAN_ALL, then we also cleanup resources related to any input
+ // buffers.
+ //
+ // Returns true if resources were cleaned up, and false if we didn't need to
+ // do any work.
+ virtual bool cleanupPostRender(CleanupMode mode = CleanupMode::CLEAN_OUTPUT_RESOURCES) = 0;
+
+ // queries
+ virtual size_t getMaxTextureSize() const = 0;
+ virtual size_t getMaxViewportDims() const = 0;
+
+ // ----- END DEPRECATED INTERFACE -----
+
+ // ----- BEGIN NEW INTERFACE -----
+
+ virtual bool isProtected() const = 0;
+ virtual bool supportsProtectedContent() const = 0;
+ virtual bool useProtectedContext(bool useProtectedContext) = 0;
+
+ // Renders layers for a particular display via GPU composition. This method
+ // should be called for every display that needs to be rendered via the GPU.
+ // @param display The display-wide settings that should be applied prior to
+ // drawing any layers.
+ //
+ // Assumptions when calling this method:
+ // 1. There is exactly one caller - i.e. multi-threading is not supported.
+ // 2. Additional threads may be calling the {bind,cache}ExternalTexture
+ // methods above. But the main thread is responsible for holding resources
+ // such that Image destruction does not occur while this method is called.
+ //
+ // TODO(b/136806342): This should behavior should ideally be fixed since
+ // the above two assumptions are brittle, as conditional thread safetyness
+ // may be insufficient when maximizing rendering performance in the future.
+ //
+ // @param layers The layers to draw onto the display, in Z-order.
+ // @param buffer The buffer which will be drawn to. This buffer will be
+ // ready once drawFence fires.
+ // @param useFramebufferCache True if the framebuffer cache should be used.
+ // If an implementation does not cache output framebuffers, then this
+ // parameter does nothing.
+ // @param bufferFence Fence signalling that the buffer is ready to be drawn
+ // to.
+ // @param drawFence A pointer to a fence, which will fire when the buffer
+ // has been drawn to and is ready to be examined. The fence will be
+ // initialized by this method. The caller will be responsible for owning the
+ // fence.
+ // @return An error code indicating whether drawing was successful. For
+ // now, this always returns NO_ERROR.
+ virtual status_t drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer, const bool useFramebufferCache,
+ base::unique_fd&& bufferFence, base::unique_fd* drawFence) = 0;
+
+protected:
+ // Gets a framebuffer to render to. This framebuffer may or may not be
+ // cached depending on the implementation.
+ //
+ // Note that this method does not transfer ownership, so the caller most not
+ // live longer than RenderEngine.
+ virtual Framebuffer* getFramebufferForDrawing() = 0;
+ friend class BindNativeBufferAsFramebuffer;
+ friend class threaded::RenderEngineThreaded;
+};
+
+struct RenderEngineCreationArgs {
+ int pixelFormat;
+ uint32_t imageCacheSize;
+ bool useColorManagement;
+ bool enableProtectedContext;
+ bool precacheToneMapperShaderOnly;
+ bool supportsBackgroundBlur;
+ RenderEngine::ContextPriority contextPriority;
+ RenderEngine::RenderEngineType renderEngineType;
+
+ struct Builder;
+
+private:
+ // must be created by Builder via constructor with full argument list
+ RenderEngineCreationArgs(int _pixelFormat, uint32_t _imageCacheSize, bool _useColorManagement,
+ bool _enableProtectedContext, bool _precacheToneMapperShaderOnly,
+ bool _supportsBackgroundBlur,
+ RenderEngine::ContextPriority _contextPriority,
+ RenderEngine::RenderEngineType _renderEngineType)
+ : pixelFormat(_pixelFormat),
+ imageCacheSize(_imageCacheSize),
+ useColorManagement(_useColorManagement),
+ enableProtectedContext(_enableProtectedContext),
+ precacheToneMapperShaderOnly(_precacheToneMapperShaderOnly),
+ supportsBackgroundBlur(_supportsBackgroundBlur),
+ contextPriority(_contextPriority),
+ renderEngineType(_renderEngineType) {}
+ RenderEngineCreationArgs() = delete;
+};
+
+struct RenderEngineCreationArgs::Builder {
+ Builder() {}
+
+ Builder& setPixelFormat(int pixelFormat) {
+ this->pixelFormat = pixelFormat;
+ return *this;
+ }
+ Builder& setImageCacheSize(uint32_t imageCacheSize) {
+ this->imageCacheSize = imageCacheSize;
+ return *this;
+ }
+ Builder& setUseColorManagerment(bool useColorManagement) {
+ this->useColorManagement = useColorManagement;
+ return *this;
+ }
+ Builder& setEnableProtectedContext(bool enableProtectedContext) {
+ this->enableProtectedContext = enableProtectedContext;
+ return *this;
+ }
+ Builder& setPrecacheToneMapperShaderOnly(bool precacheToneMapperShaderOnly) {
+ this->precacheToneMapperShaderOnly = precacheToneMapperShaderOnly;
+ return *this;
+ }
+ Builder& setSupportsBackgroundBlur(bool supportsBackgroundBlur) {
+ this->supportsBackgroundBlur = supportsBackgroundBlur;
+ return *this;
+ }
+ Builder& setContextPriority(RenderEngine::ContextPriority contextPriority) {
+ this->contextPriority = contextPriority;
+ return *this;
+ }
+ Builder& setRenderEngineType(RenderEngine::RenderEngineType renderEngineType) {
+ this->renderEngineType = renderEngineType;
+ return *this;
+ }
+ RenderEngineCreationArgs build() const {
+ return RenderEngineCreationArgs(pixelFormat, imageCacheSize, useColorManagement,
+ enableProtectedContext, precacheToneMapperShaderOnly,
+ supportsBackgroundBlur, contextPriority, renderEngineType);
+ }
+
+private:
+ // 1 means RGBA_8888
+ int pixelFormat = 1;
+ uint32_t imageCacheSize = 0;
+ bool useColorManagement = true;
+ bool enableProtectedContext = false;
+ bool precacheToneMapperShaderOnly = false;
+ bool supportsBackgroundBlur = false;
+ RenderEngine::ContextPriority contextPriority = RenderEngine::ContextPriority::MEDIUM;
+ RenderEngine::RenderEngineType renderEngineType = RenderEngine::RenderEngineType::GLES;
+};
+
+class BindNativeBufferAsFramebuffer {
+public:
+ BindNativeBufferAsFramebuffer(RenderEngine& engine, ANativeWindowBuffer* buffer,
+ const bool useFramebufferCache)
+ : mEngine(engine), mFramebuffer(mEngine.getFramebufferForDrawing()), mStatus(NO_ERROR) {
+ mStatus = mFramebuffer->setNativeWindowBuffer(buffer, mEngine.isProtected(),
+ useFramebufferCache)
+ ? mEngine.bindFrameBuffer(mFramebuffer)
+ : NO_MEMORY;
+ }
+ ~BindNativeBufferAsFramebuffer() {
+ mFramebuffer->setNativeWindowBuffer(nullptr, false, /*arbitrary*/ true);
+ mEngine.unbindFrameBuffer(mFramebuffer);
+ }
+ status_t getStatus() const { return mStatus; }
+
+private:
+ RenderEngine& mEngine;
+ Framebuffer* mFramebuffer;
+ status_t mStatus;
+};
+
+namespace impl {
+
+// impl::RenderEngine contains common implementation that is graphics back-end agnostic.
+class RenderEngine : public renderengine::RenderEngine {
+public:
+ virtual ~RenderEngine() = 0;
+
+ bool useNativeFenceSync() const override;
+ bool useWaitSync() const override;
+
+protected:
+ RenderEngine(const RenderEngineCreationArgs& args);
+ const RenderEngineCreationArgs mArgs;
+};
+
+} // namespace impl
+} // namespace renderengine
+} // namespace android
+
+#endif /* SF_RENDERENGINE_H_ */
diff --git a/media/libstagefright/renderfright/include/renderengine/Texture.h b/media/libstagefright/renderfright/include/renderengine/Texture.h
new file mode 100644
index 0000000..c69ace0
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/Texture.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_TEXTURE_H
+#define SF_RENDER_ENGINE_TEXTURE_H
+
+#include <stdint.h>
+
+#include <math/mat4.h>
+
+namespace android {
+namespace renderengine {
+
+class Texture {
+public:
+ enum Target { TEXTURE_2D = 0x0DE1, TEXTURE_EXTERNAL = 0x8D65 };
+
+ Texture();
+ Texture(Target textureTarget, uint32_t textureName);
+ ~Texture();
+
+ void init(Target textureTarget, uint32_t textureName);
+
+ void setMatrix(float const* matrix);
+ void setFiltering(bool enabled);
+ void setDimensions(size_t width, size_t height);
+
+ uint32_t getTextureName() const;
+ uint32_t getTextureTarget() const;
+
+ const mat4& getMatrix() const;
+ bool getFiltering() const;
+ size_t getWidth() const;
+ size_t getHeight() const;
+
+private:
+ uint32_t mTextureName;
+ uint32_t mTextureTarget;
+ size_t mWidth;
+ size_t mHeight;
+ bool mFiltering;
+ mat4 mTextureMatrix;
+};
+
+} // namespace renderengine
+} // namespace android
+#endif /* SF_RENDER_ENGINE_TEXTURE_H */
diff --git a/media/libstagefright/renderfright/include/renderengine/mock/Framebuffer.h b/media/libstagefright/renderfright/include/renderengine/mock/Framebuffer.h
new file mode 100644
index 0000000..dfb6a4e
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/mock/Framebuffer.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <gmock/gmock.h>
+#include <renderengine/Framebuffer.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+class Framebuffer : public renderengine::Framebuffer {
+public:
+ Framebuffer();
+ ~Framebuffer() override;
+
+ MOCK_METHOD3(setNativeWindowBuffer, bool(ANativeWindowBuffer*, bool, const bool));
+};
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/mock/Image.h b/media/libstagefright/renderfright/include/renderengine/mock/Image.h
new file mode 100644
index 0000000..2b0eed1
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/mock/Image.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <gmock/gmock.h>
+#include <renderengine/Image.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+class Image : public renderengine::Image {
+public:
+ Image();
+ ~Image() override;
+
+ MOCK_METHOD2(setNativeWindowBuffer, bool(ANativeWindowBuffer* buffer, bool isProtected));
+};
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/mock/RenderEngine.h b/media/libstagefright/renderfright/include/renderengine/mock/RenderEngine.h
new file mode 100644
index 0000000..e03dd58
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/mock/RenderEngine.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <gmock/gmock.h>
+#include <renderengine/DisplaySettings.h>
+#include <renderengine/LayerSettings.h>
+#include <renderengine/Mesh.h>
+#include <renderengine/RenderEngine.h>
+#include <renderengine/Texture.h>
+#include <ui/Fence.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/Region.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+class RenderEngine : public renderengine::RenderEngine {
+public:
+ RenderEngine();
+ ~RenderEngine() override;
+
+ MOCK_METHOD0(getFramebufferForDrawing, Framebuffer*());
+ MOCK_CONST_METHOD0(primeCache, void());
+ MOCK_METHOD1(dump, void(std::string&));
+ MOCK_CONST_METHOD0(useNativeFenceSync, bool());
+ MOCK_CONST_METHOD0(useWaitSync, bool());
+ MOCK_CONST_METHOD0(isCurrent, bool());
+ MOCK_METHOD2(genTextures, void(size_t, uint32_t*));
+ MOCK_METHOD2(deleteTextures, void(size_t, uint32_t const*));
+ MOCK_METHOD2(bindExternalTextureImage, void(uint32_t, const renderengine::Image&));
+ MOCK_METHOD1(cacheExternalTextureBuffer, void(const sp<GraphicBuffer>&));
+ MOCK_METHOD3(bindExternalTextureBuffer,
+ status_t(uint32_t, const sp<GraphicBuffer>&, const sp<Fence>&));
+ MOCK_METHOD1(unbindExternalTextureBuffer, void(uint64_t));
+ MOCK_METHOD1(bindFrameBuffer, status_t(renderengine::Framebuffer*));
+ MOCK_METHOD1(unbindFrameBuffer, void(renderengine::Framebuffer*));
+ MOCK_METHOD1(drawMesh, void(const renderengine::Mesh&));
+ MOCK_CONST_METHOD0(getMaxTextureSize, size_t());
+ MOCK_CONST_METHOD0(getMaxViewportDims, size_t());
+ MOCK_CONST_METHOD0(isProtected, bool());
+ MOCK_CONST_METHOD0(supportsProtectedContent, bool());
+ MOCK_METHOD1(useProtectedContext, bool(bool));
+ MOCK_METHOD1(cleanupPostRender, bool(CleanupMode mode));
+ MOCK_METHOD6(drawLayers,
+ status_t(const DisplaySettings&, const std::vector<const LayerSettings*>&,
+ const sp<GraphicBuffer>&, const bool, base::unique_fd&&,
+ base::unique_fd*));
+};
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/private/Description.h b/media/libstagefright/renderfright/include/renderengine/private/Description.h
new file mode 100644
index 0000000..a62161a
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/private/Description.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_DESCRIPTION_H_
+#define SF_RENDER_ENGINE_DESCRIPTION_H_
+
+#include <renderengine/Texture.h>
+#include <ui/GraphicTypes.h>
+
+namespace android {
+namespace renderengine {
+
+/*
+ * This is the structure that holds the state of the rendering engine.
+ * This class is used to generate a corresponding GLSL program and set the
+ * appropriate uniform.
+ */
+struct Description {
+ enum class TransferFunction : int {
+ LINEAR,
+ SRGB,
+ ST2084,
+ HLG, // Hybrid Log-Gamma for HDR.
+ };
+
+ static TransferFunction dataSpaceToTransferFunction(ui::Dataspace dataSpace);
+
+ Description() = default;
+ ~Description() = default;
+
+ bool hasInputTransformMatrix() const;
+ bool hasOutputTransformMatrix() const;
+ bool hasColorMatrix() const;
+
+ // whether textures are premultiplied
+ bool isPremultipliedAlpha = false;
+ // whether this layer is marked as opaque
+ bool isOpaque = true;
+
+ // corner radius of the layer
+ float cornerRadius = 0;
+
+ // Size of the rounded rectangle we are cropping to
+ half2 cropSize;
+
+ // Texture this layer uses
+ Texture texture;
+ bool textureEnabled = false;
+
+ // color used when texturing is disabled or when setting alpha.
+ half4 color;
+
+ // true if the sampled pixel values are in Y410/BT2020 rather than RGBA
+ bool isY410BT2020 = false;
+
+ // transfer functions for the input/output
+ TransferFunction inputTransferFunction = TransferFunction::LINEAR;
+ TransferFunction outputTransferFunction = TransferFunction::LINEAR;
+
+ float displayMaxLuminance;
+ float maxMasteringLuminance;
+ float maxContentLuminance;
+
+ // projection matrix
+ mat4 projectionMatrix;
+
+ // The color matrix will be applied in linear space right before OETF.
+ mat4 colorMatrix;
+ mat4 inputTransformMatrix;
+ mat4 outputTransformMatrix;
+
+ // True if this layer will draw a shadow.
+ bool drawShadows = false;
+};
+
+} // namespace renderengine
+} // namespace android
+
+#endif /* SF_RENDER_ENGINE_DESCRIPTION_H_ */
diff --git a/media/libstagefright/renderfright/mock/Framebuffer.cpp b/media/libstagefright/renderfright/mock/Framebuffer.cpp
new file mode 100644
index 0000000..fbdcaab
--- /dev/null
+++ b/media/libstagefright/renderfright/mock/Framebuffer.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/mock/Framebuffer.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+// The Google Mock documentation recommends explicit non-header instantiations
+// for better compile time performance.
+Framebuffer::Framebuffer() = default;
+Framebuffer::~Framebuffer() = default;
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/mock/Image.cpp b/media/libstagefright/renderfright/mock/Image.cpp
new file mode 100644
index 0000000..57f4346
--- /dev/null
+++ b/media/libstagefright/renderfright/mock/Image.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/mock/Image.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+// The Google Mock documentation recommends explicit non-header instantiations
+// for better compile time performance.
+Image::Image() = default;
+Image::~Image() = default;
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/mock/RenderEngine.cpp b/media/libstagefright/renderfright/mock/RenderEngine.cpp
new file mode 100644
index 0000000..261636d
--- /dev/null
+++ b/media/libstagefright/renderfright/mock/RenderEngine.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/mock/RenderEngine.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+// The Google Mock documentation recommends explicit non-header instantiations
+// for better compile time performance.
+RenderEngine::RenderEngine() = default;
+RenderEngine::~RenderEngine() = default;
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/tests/Android.bp b/media/libstagefright/renderfright/tests/Android.bp
new file mode 100644
index 0000000..9fee646
--- /dev/null
+++ b/media/libstagefright/renderfright/tests/Android.bp
@@ -0,0 +1,41 @@
+// Copyright 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_test {
+ name: "librenderfright_test",
+ defaults: ["surfaceflinger_defaults"],
+ test_suites: ["device-tests"],
+ srcs: [
+ "RenderEngineTest.cpp",
+ "RenderEngineThreadedTest.cpp",
+ ],
+ static_libs: [
+ "libgmock",
+ "librenderfright",
+ "librenderfright_mocks",
+ ],
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "libEGL",
+ "libGLESv2",
+ "libgui",
+ "liblog",
+ "libnativewindow",
+ "libprocessgroup",
+ "libsync",
+ "libui",
+ "libutils",
+ ],
+}
diff --git a/media/libstagefright/renderfright/tests/RenderEngineTest.cpp b/media/libstagefright/renderfright/tests/RenderEngineTest.cpp
new file mode 100644
index 0000000..730f606
--- /dev/null
+++ b/media/libstagefright/renderfright/tests/RenderEngineTest.cpp
@@ -0,0 +1,1469 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// TODO(b/129481165): remove the #pragma below and fix conversion issues
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wconversion"
+
+#include <chrono>
+#include <condition_variable>
+#include <fstream>
+
+#include <cutils/properties.h>
+#include <gtest/gtest.h>
+#include <renderengine/RenderEngine.h>
+#include <sync/sync.h>
+#include <ui/PixelFormat.h>
+#include "../gl/GLESRenderEngine.h"
+#include "../threaded/RenderEngineThreaded.h"
+
+constexpr int DEFAULT_DISPLAY_WIDTH = 128;
+constexpr int DEFAULT_DISPLAY_HEIGHT = 256;
+constexpr int DEFAULT_DISPLAY_OFFSET = 64;
+constexpr bool WRITE_BUFFER_TO_FILE_ON_FAILURE = false;
+
+namespace android {
+
+struct RenderEngineTest : public ::testing::Test {
+ static void SetUpTestSuite() {
+ sRE = renderengine::gl::GLESRenderEngine::create(
+ renderengine::RenderEngineCreationArgs::Builder()
+ .setPixelFormat(static_cast<int>(ui::PixelFormat::RGBA_8888))
+ .setImageCacheSize(1)
+ .setUseColorManagerment(false)
+ .setEnableProtectedContext(false)
+ .setPrecacheToneMapperShaderOnly(false)
+ .setSupportsBackgroundBlur(true)
+ .setContextPriority(renderengine::RenderEngine::ContextPriority::MEDIUM)
+ .setRenderEngineType(renderengine::RenderEngine::RenderEngineType::GLES)
+ .build());
+ }
+
+ static void TearDownTestSuite() {
+ // The ordering here is important - sCurrentBuffer must live longer
+ // than RenderEngine to avoid a null reference on tear-down.
+ sRE = nullptr;
+ sCurrentBuffer = nullptr;
+ }
+
+ static sp<GraphicBuffer> allocateDefaultBuffer() {
+ return new GraphicBuffer(DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT,
+ HAL_PIXEL_FORMAT_RGBA_8888, 1,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
+ GRALLOC_USAGE_HW_RENDER,
+ "output");
+ }
+
+ // Allocates a 1x1 buffer to fill with a solid color
+ static sp<GraphicBuffer> allocateSourceBuffer(uint32_t width, uint32_t height) {
+ return new GraphicBuffer(width, height, HAL_PIXEL_FORMAT_RGBA_8888, 1,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
+ GRALLOC_USAGE_HW_TEXTURE,
+ "input");
+ }
+
+ RenderEngineTest() { mBuffer = allocateDefaultBuffer(); }
+
+ ~RenderEngineTest() {
+ if (WRITE_BUFFER_TO_FILE_ON_FAILURE && ::testing::Test::HasFailure()) {
+ writeBufferToFile("/data/texture_out_");
+ }
+ for (uint32_t texName : mTexNames) {
+ sRE->deleteTextures(1, &texName);
+ EXPECT_FALSE(sRE->isTextureNameKnownForTesting(texName));
+ }
+ }
+
+ void writeBufferToFile(const char* basename) {
+ std::string filename(basename);
+ filename.append(::testing::UnitTest::GetInstance()->current_test_info()->name());
+ filename.append(".ppm");
+ std::ofstream file(filename.c_str(), std::ios::binary);
+ if (!file.is_open()) {
+ ALOGE("Unable to open file: %s", filename.c_str());
+ ALOGE("You may need to do: \"adb shell setenforce 0\" to enable "
+ "surfaceflinger to write debug images");
+ return;
+ }
+
+ uint8_t* pixels;
+ mBuffer->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+
+ file << "P6\n";
+ file << mBuffer->getWidth() << "\n";
+ file << mBuffer->getHeight() << "\n";
+ file << 255 << "\n";
+
+ std::vector<uint8_t> outBuffer(mBuffer->getWidth() * mBuffer->getHeight() * 3);
+ auto outPtr = reinterpret_cast<uint8_t*>(outBuffer.data());
+
+ for (int32_t j = 0; j < mBuffer->getHeight(); j++) {
+ const uint8_t* src = pixels + (mBuffer->getStride() * j) * 4;
+ for (int32_t i = 0; i < mBuffer->getWidth(); i++) {
+ // Only copy R, G and B components
+ outPtr[0] = src[0];
+ outPtr[1] = src[1];
+ outPtr[2] = src[2];
+ outPtr += 3;
+
+ src += 4;
+ }
+ }
+ file.write(reinterpret_cast<char*>(outBuffer.data()), outBuffer.size());
+ mBuffer->unlock();
+ }
+
+ void expectBufferColor(const Region& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a) {
+ size_t c;
+ Rect const* rect = region.getArray(&c);
+ for (size_t i = 0; i < c; i++, rect++) {
+ expectBufferColor(*rect, r, g, b, a);
+ }
+ }
+
+ void expectBufferColor(const Rect& rect, uint8_t r, uint8_t g, uint8_t b, uint8_t a,
+ uint8_t tolerance = 0) {
+ auto colorCompare = [tolerance](const uint8_t* colorA, const uint8_t* colorB) {
+ auto colorBitCompare = [tolerance](uint8_t a, uint8_t b) {
+ uint8_t tmp = a >= b ? a - b : b - a;
+ return tmp <= tolerance;
+ };
+ return std::equal(colorA, colorA + 4, colorB, colorBitCompare);
+ };
+
+ expectBufferColor(rect, r, g, b, a, colorCompare);
+ }
+
+ void expectBufferColor(const Rect& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a,
+ std::function<bool(const uint8_t* a, const uint8_t* b)> colorCompare) {
+ uint8_t* pixels;
+ mBuffer->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+ int32_t maxFails = 10;
+ int32_t fails = 0;
+ for (int32_t j = 0; j < region.getHeight(); j++) {
+ const uint8_t* src =
+ pixels + (mBuffer->getStride() * (region.top + j) + region.left) * 4;
+ for (int32_t i = 0; i < region.getWidth(); i++) {
+ const uint8_t expected[4] = {r, g, b, a};
+ bool equal = colorCompare(src, expected);
+ EXPECT_TRUE(equal)
+ << "pixel @ (" << region.left + i << ", " << region.top + j << "): "
+ << "expected (" << static_cast<uint32_t>(r) << ", "
+ << static_cast<uint32_t>(g) << ", " << static_cast<uint32_t>(b) << ", "
+ << static_cast<uint32_t>(a) << "), "
+ << "got (" << static_cast<uint32_t>(src[0]) << ", "
+ << static_cast<uint32_t>(src[1]) << ", " << static_cast<uint32_t>(src[2])
+ << ", " << static_cast<uint32_t>(src[3]) << ")";
+ src += 4;
+ if (!equal && ++fails >= maxFails) {
+ break;
+ }
+ }
+ if (fails >= maxFails) {
+ break;
+ }
+ }
+ mBuffer->unlock();
+ }
+
+ void expectAlpha(const Rect& rect, uint8_t a) {
+ auto colorCompare = [](const uint8_t* colorA, const uint8_t* colorB) {
+ return colorA[3] == colorB[3];
+ };
+ expectBufferColor(rect, 0.0f /* r */, 0.0f /*g */, 0.0f /* b */, a, colorCompare);
+ }
+
+ void expectShadowColor(const renderengine::LayerSettings& castingLayer,
+ const renderengine::ShadowSettings& shadow, const ubyte4& casterColor,
+ const ubyte4& backgroundColor) {
+ const Rect casterRect(castingLayer.geometry.boundaries);
+ Region casterRegion = Region(casterRect);
+ const float casterCornerRadius = castingLayer.geometry.roundedCornersRadius;
+ if (casterCornerRadius > 0.0f) {
+ // ignore the corners if a corner radius is set
+ Rect cornerRect(casterCornerRadius, casterCornerRadius);
+ casterRegion.subtractSelf(cornerRect.offsetTo(casterRect.left, casterRect.top));
+ casterRegion.subtractSelf(
+ cornerRect.offsetTo(casterRect.right - casterCornerRadius, casterRect.top));
+ casterRegion.subtractSelf(
+ cornerRect.offsetTo(casterRect.left, casterRect.bottom - casterCornerRadius));
+ casterRegion.subtractSelf(cornerRect.offsetTo(casterRect.right - casterCornerRadius,
+ casterRect.bottom - casterCornerRadius));
+ }
+
+ const float shadowInset = shadow.length * -1.0f;
+ const Rect casterWithShadow =
+ Rect(casterRect).inset(shadowInset, shadowInset, shadowInset, shadowInset);
+ const Region shadowRegion = Region(casterWithShadow).subtractSelf(casterRect);
+ const Region backgroundRegion = Region(fullscreenRect()).subtractSelf(casterWithShadow);
+
+ // verify casting layer
+ expectBufferColor(casterRegion, casterColor.r, casterColor.g, casterColor.b, casterColor.a);
+
+ // verify shadows by testing just the alpha since its difficult to validate the shadow color
+ size_t c;
+ Rect const* r = shadowRegion.getArray(&c);
+ for (size_t i = 0; i < c; i++, r++) {
+ expectAlpha(*r, 255);
+ }
+
+ // verify background
+ expectBufferColor(backgroundRegion, backgroundColor.r, backgroundColor.g, backgroundColor.b,
+ backgroundColor.a);
+ }
+
+ static renderengine::ShadowSettings getShadowSettings(const vec2& casterPos, float shadowLength,
+ bool casterIsTranslucent) {
+ renderengine::ShadowSettings shadow;
+ shadow.ambientColor = {0.0f, 0.0f, 0.0f, 0.039f};
+ shadow.spotColor = {0.0f, 0.0f, 0.0f, 0.19f};
+ shadow.lightPos = vec3(casterPos.x, casterPos.y, 0);
+ shadow.lightRadius = 0.0f;
+ shadow.length = shadowLength;
+ shadow.casterIsTranslucent = casterIsTranslucent;
+ return shadow;
+ }
+
+ static Rect fullscreenRect() { return Rect(DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT); }
+
+ static Rect offsetRect() {
+ return Rect(DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT);
+ }
+
+ static Rect offsetRectAtZero() {
+ return Rect(DEFAULT_DISPLAY_WIDTH - DEFAULT_DISPLAY_OFFSET,
+ DEFAULT_DISPLAY_HEIGHT - DEFAULT_DISPLAY_OFFSET);
+ }
+
+ void invokeDraw(renderengine::DisplaySettings settings,
+ std::vector<const renderengine::LayerSettings*> layers,
+ sp<GraphicBuffer> buffer) {
+ base::unique_fd fence;
+ status_t status =
+ sRE->drawLayers(settings, layers, buffer, true, base::unique_fd(), &fence);
+ sCurrentBuffer = buffer;
+
+ int fd = fence.release();
+ if (fd >= 0) {
+ sync_wait(fd, -1);
+ close(fd);
+ }
+
+ ASSERT_EQ(NO_ERROR, status);
+ if (layers.size() > 0) {
+ ASSERT_TRUE(sRE->isFramebufferImageCachedForTesting(buffer->getId()));
+ }
+ }
+
+ void drawEmptyLayers() {
+ renderengine::DisplaySettings settings;
+ std::vector<const renderengine::LayerSettings*> layers;
+ // Meaningless buffer since we don't do any drawing
+ sp<GraphicBuffer> buffer = new GraphicBuffer();
+ invokeDraw(settings, layers, buffer);
+ }
+
+ template <typename SourceVariant>
+ void fillBuffer(half r, half g, half b, half a);
+
+ template <typename SourceVariant>
+ void fillRedBuffer();
+
+ template <typename SourceVariant>
+ void fillGreenBuffer();
+
+ template <typename SourceVariant>
+ void fillBlueBuffer();
+
+ template <typename SourceVariant>
+ void fillRedTransparentBuffer();
+
+ template <typename SourceVariant>
+ void fillRedOffsetBuffer();
+
+ template <typename SourceVariant>
+ void fillBufferPhysicalOffset();
+
+ template <typename SourceVariant>
+ void fillBufferCheckers(uint32_t rotation);
+
+ template <typename SourceVariant>
+ void fillBufferCheckersRotate0();
+
+ template <typename SourceVariant>
+ void fillBufferCheckersRotate90();
+
+ template <typename SourceVariant>
+ void fillBufferCheckersRotate180();
+
+ template <typename SourceVariant>
+ void fillBufferCheckersRotate270();
+
+ template <typename SourceVariant>
+ void fillBufferWithLayerTransform();
+
+ template <typename SourceVariant>
+ void fillBufferLayerTransform();
+
+ template <typename SourceVariant>
+ void fillBufferWithColorTransform();
+
+ template <typename SourceVariant>
+ void fillBufferColorTransform();
+
+ template <typename SourceVariant>
+ void fillRedBufferWithRoundedCorners();
+
+ template <typename SourceVariant>
+ void fillBufferWithRoundedCorners();
+
+ template <typename SourceVariant>
+ void fillBufferAndBlurBackground();
+
+ template <typename SourceVariant>
+ void overlayCorners();
+
+ void fillRedBufferTextureTransform();
+
+ void fillBufferTextureTransform();
+
+ void fillRedBufferWithPremultiplyAlpha();
+
+ void fillBufferWithPremultiplyAlpha();
+
+ void fillRedBufferWithoutPremultiplyAlpha();
+
+ void fillBufferWithoutPremultiplyAlpha();
+
+ void fillGreenColorBufferThenClearRegion();
+
+ void clearLeftRegion();
+
+ void clearRegion();
+
+ template <typename SourceVariant>
+ void drawShadow(const renderengine::LayerSettings& castingLayer,
+ const renderengine::ShadowSettings& shadow, const ubyte4& casterColor,
+ const ubyte4& backgroundColor);
+
+ // Keep around the same renderengine object to save on initialization time.
+ // For now, exercise the GL backend directly so that some caching specifics
+ // can be tested without changing the interface.
+ static std::unique_ptr<renderengine::gl::GLESRenderEngine> sRE;
+ // Hack to avoid NPE in the EGL driver: the GraphicBuffer needs to
+ // be freed *after* RenderEngine is destroyed, so that the EGL image is
+ // destroyed first.
+ static sp<GraphicBuffer> sCurrentBuffer;
+
+ sp<GraphicBuffer> mBuffer;
+
+ std::vector<uint32_t> mTexNames;
+};
+
+std::unique_ptr<renderengine::gl::GLESRenderEngine> RenderEngineTest::sRE = nullptr;
+sp<GraphicBuffer> RenderEngineTest::sCurrentBuffer = nullptr;
+
+struct ColorSourceVariant {
+ static void fillColor(renderengine::LayerSettings& layer, half r, half g, half b,
+ RenderEngineTest* /*fixture*/) {
+ layer.source.solidColor = half3(r, g, b);
+ }
+};
+
+struct RelaxOpaqueBufferVariant {
+ static void setOpaqueBit(renderengine::LayerSettings& layer) {
+ layer.source.buffer.isOpaque = false;
+ }
+
+ static uint8_t getAlphaChannel() { return 255; }
+};
+
+struct ForceOpaqueBufferVariant {
+ static void setOpaqueBit(renderengine::LayerSettings& layer) {
+ layer.source.buffer.isOpaque = true;
+ }
+
+ static uint8_t getAlphaChannel() {
+ // The isOpaque bit will override the alpha channel, so this should be
+ // arbitrary.
+ return 10;
+ }
+};
+
+template <typename OpaquenessVariant>
+struct BufferSourceVariant {
+ static void fillColor(renderengine::LayerSettings& layer, half r, half g, half b,
+ RenderEngineTest* fixture) {
+ sp<GraphicBuffer> buf = RenderEngineTest::allocateSourceBuffer(1, 1);
+ uint32_t texName;
+ fixture->sRE->genTextures(1, &texName);
+ fixture->mTexNames.push_back(texName);
+
+ uint8_t* pixels;
+ buf->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+
+ for (int32_t j = 0; j < buf->getHeight(); j++) {
+ uint8_t* iter = pixels + (buf->getStride() * j) * 4;
+ for (int32_t i = 0; i < buf->getWidth(); i++) {
+ iter[0] = uint8_t(r * 255);
+ iter[1] = uint8_t(g * 255);
+ iter[2] = uint8_t(b * 255);
+ iter[3] = OpaquenessVariant::getAlphaChannel();
+ iter += 4;
+ }
+ }
+
+ buf->unlock();
+
+ layer.source.buffer.buffer = buf;
+ layer.source.buffer.textureName = texName;
+ OpaquenessVariant::setOpaqueBit(layer);
+ }
+};
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBuffer(half r, half g, half b, half a) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ SourceVariant::fillColor(layer, r, g, b, this);
+ layer.alpha = a;
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillRedBuffer() {
+ fillBuffer<SourceVariant>(1.0f, 0.0f, 0.0f, 1.0f);
+ expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillGreenBuffer() {
+ fillBuffer<SourceVariant>(0.0f, 1.0f, 0.0f, 1.0f);
+ expectBufferColor(fullscreenRect(), 0, 255, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBlueBuffer() {
+ fillBuffer<SourceVariant>(0.0f, 0.0f, 1.0f, 1.0f);
+ expectBufferColor(fullscreenRect(), 0, 0, 255, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillRedTransparentBuffer() {
+ fillBuffer<SourceVariant>(1.0f, 0.0f, 0.0f, .2f);
+ expectBufferColor(fullscreenRect(), 51, 0, 0, 51);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillRedOffsetBuffer() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = offsetRect();
+ settings.clip = offsetRectAtZero();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = offsetRectAtZero().toFloatRect();
+ SourceVariant::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0f;
+
+ layers.push_back(&layer);
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferPhysicalOffset() {
+ fillRedOffsetBuffer<SourceVariant>();
+
+ expectBufferColor(Rect(DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT),
+ 255, 0, 0, 255);
+ Rect offsetRegionLeft(DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_HEIGHT);
+ Rect offsetRegionTop(DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_OFFSET);
+
+ expectBufferColor(offsetRegionLeft, 0, 0, 0, 0);
+ expectBufferColor(offsetRegionTop, 0, 0, 0, 0);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckers(uint32_t orientationFlag) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 2x2
+ settings.clip = Rect(2, 2);
+ settings.orientation = orientationFlag;
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layerOne;
+ Rect rectOne(0, 0, 1, 1);
+ layerOne.geometry.boundaries = rectOne.toFloatRect();
+ SourceVariant::fillColor(layerOne, 1.0f, 0.0f, 0.0f, this);
+ layerOne.alpha = 1.0f;
+
+ renderengine::LayerSettings layerTwo;
+ Rect rectTwo(0, 1, 1, 2);
+ layerTwo.geometry.boundaries = rectTwo.toFloatRect();
+ SourceVariant::fillColor(layerTwo, 0.0f, 1.0f, 0.0f, this);
+ layerTwo.alpha = 1.0f;
+
+ renderengine::LayerSettings layerThree;
+ Rect rectThree(1, 0, 2, 1);
+ layerThree.geometry.boundaries = rectThree.toFloatRect();
+ SourceVariant::fillColor(layerThree, 0.0f, 0.0f, 1.0f, this);
+ layerThree.alpha = 1.0f;
+
+ layers.push_back(&layerOne);
+ layers.push_back(&layerTwo);
+ layers.push_back(&layerThree);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckersRotate0() {
+ fillBufferCheckers<SourceVariant>(ui::Transform::ROT_0);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2), 255, 0, 0,
+ 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT / 2),
+ 0, 0, 255, 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT / 2, DEFAULT_DISPLAY_WIDTH / 2,
+ DEFAULT_DISPLAY_HEIGHT),
+ 0, 255, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckersRotate90() {
+ fillBufferCheckers<SourceVariant>(ui::Transform::ROT_90);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2), 0, 255, 0,
+ 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT / 2),
+ 255, 0, 0, 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 255, 255);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT / 2, DEFAULT_DISPLAY_WIDTH / 2,
+ DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckersRotate180() {
+ fillBufferCheckers<SourceVariant>(ui::Transform::ROT_180);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2), 0, 0, 0,
+ 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT / 2),
+ 0, 255, 0, 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 255, 0, 0, 255);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT / 2, DEFAULT_DISPLAY_WIDTH / 2,
+ DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 255, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckersRotate270() {
+ fillBufferCheckers<SourceVariant>(ui::Transform::ROT_270);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2), 0, 0, 255,
+ 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT / 2),
+ 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 255, 0, 255);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT / 2, DEFAULT_DISPLAY_WIDTH / 2,
+ DEFAULT_DISPLAY_HEIGHT),
+ 255, 0, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferWithLayerTransform() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 2x2
+ settings.clip = Rect(2, 2);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+ // Translate one pixel diagonally
+ layer.geometry.positionTransform = mat4(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1);
+ SourceVariant::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.source.solidColor = half3(1.0f, 0.0f, 0.0f);
+ layer.alpha = 1.0f;
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferLayerTransform() {
+ fillBufferWithLayerTransform<SourceVariant>();
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT / 2), 0, 0, 0, 0);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT), 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 255, 0, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferWithColorTransform() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = Rect(1, 1);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+ SourceVariant::fillColor(layer, 0.5f, 0.25f, 0.125f, this);
+ layer.alpha = 1.0f;
+
+ // construct a fake color matrix
+ // annihilate green and blue channels
+ settings.colorTransform = mat4::scale(vec4(1, 0, 0, 1));
+ // set red channel to red + green
+ layer.colorTransform = mat4(1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1);
+
+ layer.alpha = 1.0f;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferColorTransform() {
+ fillBufferWithColorTransform<SourceVariant>();
+ expectBufferColor(fullscreenRect(), 191, 0, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillRedBufferWithRoundedCorners() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ layer.geometry.roundedCornersRadius = 5.0f;
+ layer.geometry.roundedCornersCrop = fullscreenRect().toFloatRect();
+ SourceVariant::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0f;
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferWithRoundedCorners() {
+ fillRedBufferWithRoundedCorners<SourceVariant>();
+ // Corners should be ignored...
+ expectBufferColor(Rect(0, 0, 1, 1), 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH - 1, 0, DEFAULT_DISPLAY_WIDTH, 1), 0, 0, 0, 0);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT - 1, 1, DEFAULT_DISPLAY_HEIGHT), 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH - 1, DEFAULT_DISPLAY_HEIGHT - 1,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+ // ...And the non-rounded portion should be red.
+ // Other pixels may be anti-aliased, so let's not check those.
+ expectBufferColor(Rect(5, 5, DEFAULT_DISPLAY_WIDTH - 5, DEFAULT_DISPLAY_HEIGHT - 5), 255, 0, 0,
+ 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferAndBlurBackground() {
+ char value[PROPERTY_VALUE_MAX];
+ property_get("ro.surface_flinger.supports_background_blur", value, "0");
+ if (!atoi(value)) {
+ // This device doesn't support blurs, no-op.
+ return;
+ }
+
+ auto blurRadius = 50;
+ auto center = DEFAULT_DISPLAY_WIDTH / 2;
+
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings backgroundLayer;
+ backgroundLayer.geometry.boundaries = fullscreenRect().toFloatRect();
+ SourceVariant::fillColor(backgroundLayer, 0.0f, 1.0f, 0.0f, this);
+ backgroundLayer.alpha = 1.0f;
+ layers.push_back(&backgroundLayer);
+
+ renderengine::LayerSettings leftLayer;
+ leftLayer.geometry.boundaries =
+ Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT).toFloatRect();
+ SourceVariant::fillColor(leftLayer, 1.0f, 0.0f, 0.0f, this);
+ leftLayer.alpha = 1.0f;
+ layers.push_back(&leftLayer);
+
+ renderengine::LayerSettings blurLayer;
+ blurLayer.geometry.boundaries = fullscreenRect().toFloatRect();
+ blurLayer.backgroundBlurRadius = blurRadius;
+ blurLayer.alpha = 0;
+ layers.push_back(&blurLayer);
+
+ invokeDraw(settings, layers, mBuffer);
+
+ expectBufferColor(Rect(center - 1, center - 5, center, center + 5), 150, 150, 0, 255,
+ 50 /* tolerance */);
+ expectBufferColor(Rect(center, center - 5, center + 1, center + 5), 150, 150, 0, 255,
+ 50 /* tolerance */);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::overlayCorners() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layersFirst;
+
+ renderengine::LayerSettings layerOne;
+ layerOne.geometry.boundaries =
+ FloatRect(0, 0, DEFAULT_DISPLAY_WIDTH / 3.0, DEFAULT_DISPLAY_HEIGHT / 3.0);
+ SourceVariant::fillColor(layerOne, 1.0f, 0.0f, 0.0f, this);
+ layerOne.alpha = 0.2;
+
+ layersFirst.push_back(&layerOne);
+ invokeDraw(settings, layersFirst, mBuffer);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 3, DEFAULT_DISPLAY_HEIGHT / 3), 51, 0, 0, 51);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 3 + 1, DEFAULT_DISPLAY_HEIGHT / 3 + 1,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+
+ std::vector<const renderengine::LayerSettings*> layersSecond;
+ renderengine::LayerSettings layerTwo;
+ layerTwo.geometry.boundaries =
+ FloatRect(DEFAULT_DISPLAY_WIDTH / 3.0, DEFAULT_DISPLAY_HEIGHT / 3.0,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT);
+ SourceVariant::fillColor(layerTwo, 0.0f, 1.0f, 0.0f, this);
+ layerTwo.alpha = 1.0f;
+
+ layersSecond.push_back(&layerTwo);
+ invokeDraw(settings, layersSecond, mBuffer);
+
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 3, DEFAULT_DISPLAY_HEIGHT / 3), 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 3 + 1, DEFAULT_DISPLAY_HEIGHT / 3 + 1,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 255, 0, 255);
+}
+
+void RenderEngineTest::fillRedBufferTextureTransform() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = Rect(1, 1);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ // Here will allocate a checker board texture, but transform texture
+ // coordinates so that only the upper left is applied.
+ sp<GraphicBuffer> buf = allocateSourceBuffer(2, 2);
+ uint32_t texName;
+ RenderEngineTest::sRE->genTextures(1, &texName);
+ this->mTexNames.push_back(texName);
+
+ uint8_t* pixels;
+ buf->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+ // Red top left, Green top right, Blue bottom left, Black bottom right
+ pixels[0] = 255;
+ pixels[1] = 0;
+ pixels[2] = 0;
+ pixels[3] = 255;
+ pixels[4] = 0;
+ pixels[5] = 255;
+ pixels[6] = 0;
+ pixels[7] = 255;
+ pixels[8] = 0;
+ pixels[9] = 0;
+ pixels[10] = 255;
+ pixels[11] = 255;
+ buf->unlock();
+
+ layer.source.buffer.buffer = buf;
+ layer.source.buffer.textureName = texName;
+ // Transform coordinates to only be inside the red quadrant.
+ layer.source.buffer.textureTransform = mat4::scale(vec4(0.2, 0.2, 1, 1));
+ layer.alpha = 1.0f;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+void RenderEngineTest::fillBufferTextureTransform() {
+ fillRedBufferTextureTransform();
+ expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+}
+
+void RenderEngineTest::fillRedBufferWithPremultiplyAlpha() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 1x1
+ settings.clip = Rect(1, 1);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ sp<GraphicBuffer> buf = allocateSourceBuffer(1, 1);
+ uint32_t texName;
+ RenderEngineTest::sRE->genTextures(1, &texName);
+ this->mTexNames.push_back(texName);
+
+ uint8_t* pixels;
+ buf->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+ pixels[0] = 255;
+ pixels[1] = 0;
+ pixels[2] = 0;
+ pixels[3] = 255;
+ buf->unlock();
+
+ layer.source.buffer.buffer = buf;
+ layer.source.buffer.textureName = texName;
+ layer.source.buffer.usePremultipliedAlpha = true;
+ layer.alpha = 0.5f;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+void RenderEngineTest::fillBufferWithPremultiplyAlpha() {
+ fillRedBufferWithPremultiplyAlpha();
+ expectBufferColor(fullscreenRect(), 128, 0, 0, 128);
+}
+
+void RenderEngineTest::fillRedBufferWithoutPremultiplyAlpha() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 1x1
+ settings.clip = Rect(1, 1);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ sp<GraphicBuffer> buf = allocateSourceBuffer(1, 1);
+ uint32_t texName;
+ RenderEngineTest::sRE->genTextures(1, &texName);
+ this->mTexNames.push_back(texName);
+
+ uint8_t* pixels;
+ buf->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+ pixels[0] = 255;
+ pixels[1] = 0;
+ pixels[2] = 0;
+ pixels[3] = 255;
+ buf->unlock();
+
+ layer.source.buffer.buffer = buf;
+ layer.source.buffer.textureName = texName;
+ layer.source.buffer.usePremultipliedAlpha = false;
+ layer.alpha = 0.5f;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+void RenderEngineTest::fillBufferWithoutPremultiplyAlpha() {
+ fillRedBufferWithoutPremultiplyAlpha();
+ expectBufferColor(fullscreenRect(), 128, 0, 0, 64, 1);
+}
+
+void RenderEngineTest::clearLeftRegion() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 4x4
+ settings.clip = Rect(4, 4);
+ settings.clearRegion = Region(Rect(2, 4));
+ std::vector<const renderengine::LayerSettings*> layers;
+ // fake layer, without bounds should not render anything
+ renderengine::LayerSettings layer;
+ layers.push_back(&layer);
+ invokeDraw(settings, layers, mBuffer);
+}
+
+void RenderEngineTest::clearRegion() {
+ // Reuse mBuffer
+ clearLeftRegion();
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT), 0, 0, 0, 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::drawShadow(const renderengine::LayerSettings& castingLayer,
+ const renderengine::ShadowSettings& shadow,
+ const ubyte4& casterColor, const ubyte4& backgroundColor) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ // add background layer
+ renderengine::LayerSettings bgLayer;
+ bgLayer.geometry.boundaries = fullscreenRect().toFloatRect();
+ ColorSourceVariant::fillColor(bgLayer, backgroundColor.r / 255.0f, backgroundColor.g / 255.0f,
+ backgroundColor.b / 255.0f, this);
+ bgLayer.alpha = backgroundColor.a / 255.0f;
+ layers.push_back(&bgLayer);
+
+ // add shadow layer
+ renderengine::LayerSettings shadowLayer;
+ shadowLayer.geometry.boundaries = castingLayer.geometry.boundaries;
+ shadowLayer.alpha = castingLayer.alpha;
+ shadowLayer.shadow = shadow;
+ layers.push_back(&shadowLayer);
+
+ // add layer casting the shadow
+ renderengine::LayerSettings layer = castingLayer;
+ SourceVariant::fillColor(layer, casterColor.r / 255.0f, casterColor.g / 255.0f,
+ casterColor.b / 255.0f, this);
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+TEST_F(RenderEngineTest, drawLayers_noLayersToDraw) {
+ drawEmptyLayers();
+}
+
+TEST_F(RenderEngineTest, drawLayers_nullOutputBuffer) {
+ renderengine::DisplaySettings settings;
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layers.push_back(&layer);
+ base::unique_fd fence;
+ status_t status = sRE->drawLayers(settings, layers, nullptr, true, base::unique_fd(), &fence);
+
+ ASSERT_EQ(BAD_VALUE, status);
+}
+
+TEST_F(RenderEngineTest, drawLayers_nullOutputFence) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0;
+ layers.push_back(&layer);
+
+ status_t status = sRE->drawLayers(settings, layers, mBuffer, true, base::unique_fd(), nullptr);
+ sCurrentBuffer = mBuffer;
+ ASSERT_EQ(NO_ERROR, status);
+ expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+}
+
+TEST_F(RenderEngineTest, drawLayers_doesNotCacheFramebuffer) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0;
+ layers.push_back(&layer);
+
+ status_t status = sRE->drawLayers(settings, layers, mBuffer, false, base::unique_fd(), nullptr);
+ sCurrentBuffer = mBuffer;
+ ASSERT_EQ(NO_ERROR, status);
+ ASSERT_FALSE(sRE->isFramebufferImageCachedForTesting(mBuffer->getId()));
+ expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedBuffer_colorSource) {
+ fillRedBuffer<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillGreenBuffer_colorSource) {
+ fillGreenBuffer<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBlueBuffer_colorSource) {
+ fillBlueBuffer<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedTransparentBuffer_colorSource) {
+ fillRedTransparentBuffer<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferPhysicalOffset_colorSource) {
+ fillBufferPhysicalOffset<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate0_colorSource) {
+ fillBufferCheckersRotate0<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate90_colorSource) {
+ fillBufferCheckersRotate90<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate180_colorSource) {
+ fillBufferCheckersRotate180<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate270_colorSource) {
+ fillBufferCheckersRotate270<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferLayerTransform_colorSource) {
+ fillBufferLayerTransform<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferColorTransform_colorSource) {
+ fillBufferLayerTransform<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferRoundedCorners_colorSource) {
+ fillBufferWithRoundedCorners<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferAndBlurBackground_colorSource) {
+ fillBufferAndBlurBackground<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_overlayCorners_colorSource) {
+ overlayCorners<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedBuffer_opaqueBufferSource) {
+ fillRedBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillGreenBuffer_opaqueBufferSource) {
+ fillGreenBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBlueBuffer_opaqueBufferSource) {
+ fillBlueBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedTransparentBuffer_opaqueBufferSource) {
+ fillRedTransparentBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferPhysicalOffset_opaqueBufferSource) {
+ fillBufferPhysicalOffset<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate0_opaqueBufferSource) {
+ fillBufferCheckersRotate0<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate90_opaqueBufferSource) {
+ fillBufferCheckersRotate90<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate180_opaqueBufferSource) {
+ fillBufferCheckersRotate180<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate270_opaqueBufferSource) {
+ fillBufferCheckersRotate270<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferLayerTransform_opaqueBufferSource) {
+ fillBufferLayerTransform<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferColorTransform_opaqueBufferSource) {
+ fillBufferLayerTransform<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferRoundedCorners_opaqueBufferSource) {
+ fillBufferWithRoundedCorners<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferAndBlurBackground_opaqueBufferSource) {
+ fillBufferAndBlurBackground<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_overlayCorners_opaqueBufferSource) {
+ overlayCorners<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedBuffer_bufferSource) {
+ fillRedBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillGreenBuffer_bufferSource) {
+ fillGreenBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBlueBuffer_bufferSource) {
+ fillBlueBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedTransparentBuffer_bufferSource) {
+ fillRedTransparentBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferPhysicalOffset_bufferSource) {
+ fillBufferPhysicalOffset<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate0_bufferSource) {
+ fillBufferCheckersRotate0<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate90_bufferSource) {
+ fillBufferCheckersRotate90<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate180_bufferSource) {
+ fillBufferCheckersRotate180<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate270_bufferSource) {
+ fillBufferCheckersRotate270<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferLayerTransform_bufferSource) {
+ fillBufferLayerTransform<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferColorTransform_bufferSource) {
+ fillBufferLayerTransform<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferRoundedCorners_bufferSource) {
+ fillBufferWithRoundedCorners<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferAndBlurBackground_bufferSource) {
+ fillBufferAndBlurBackground<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_overlayCorners_bufferSource) {
+ overlayCorners<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferTextureTransform) {
+ fillBufferTextureTransform();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBuffer_premultipliesAlpha) {
+ fillBufferWithPremultiplyAlpha();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBuffer_withoutPremultiplyingAlpha) {
+ fillBufferWithoutPremultiplyAlpha();
+}
+
+TEST_F(RenderEngineTest, drawLayers_clearRegion) {
+ clearRegion();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillsBufferAndCachesImages) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+
+ layers.push_back(&layer);
+ invokeDraw(settings, layers, mBuffer);
+ uint64_t bufferId = layer.source.buffer.buffer->getId();
+ EXPECT_TRUE(sRE->isImageCachedForTesting(bufferId));
+ std::shared_ptr<renderengine::gl::ImageManager::Barrier> barrier =
+ sRE->unbindExternalTextureBufferForTesting(bufferId);
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_FALSE(sRE->isImageCachedForTesting(bufferId));
+ EXPECT_EQ(NO_ERROR, barrier->result);
+}
+
+TEST_F(RenderEngineTest, bindExternalBuffer_withNullBuffer) {
+ status_t result = sRE->bindExternalTextureBuffer(0, nullptr, nullptr);
+ ASSERT_EQ(BAD_VALUE, result);
+}
+
+TEST_F(RenderEngineTest, bindExternalBuffer_cachesImages) {
+ sp<GraphicBuffer> buf = allocateSourceBuffer(1, 1);
+ uint32_t texName;
+ sRE->genTextures(1, &texName);
+ mTexNames.push_back(texName);
+
+ sRE->bindExternalTextureBuffer(texName, buf, nullptr);
+ uint64_t bufferId = buf->getId();
+ EXPECT_TRUE(sRE->isImageCachedForTesting(bufferId));
+ std::shared_ptr<renderengine::gl::ImageManager::Barrier> barrier =
+ sRE->unbindExternalTextureBufferForTesting(bufferId);
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_EQ(NO_ERROR, barrier->result);
+ EXPECT_FALSE(sRE->isImageCachedForTesting(bufferId));
+}
+
+TEST_F(RenderEngineTest, cacheExternalBuffer_withNullBuffer) {
+ std::shared_ptr<renderengine::gl::ImageManager::Barrier> barrier =
+ sRE->cacheExternalTextureBufferForTesting(nullptr);
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_TRUE(barrier->isOpen);
+ EXPECT_EQ(BAD_VALUE, barrier->result);
+}
+
+TEST_F(RenderEngineTest, cacheExternalBuffer_cachesImages) {
+ sp<GraphicBuffer> buf = allocateSourceBuffer(1, 1);
+ uint64_t bufferId = buf->getId();
+ std::shared_ptr<renderengine::gl::ImageManager::Barrier> barrier =
+ sRE->cacheExternalTextureBufferForTesting(buf);
+ {
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_EQ(NO_ERROR, barrier->result);
+ }
+ EXPECT_TRUE(sRE->isImageCachedForTesting(bufferId));
+ barrier = sRE->unbindExternalTextureBufferForTesting(bufferId);
+ {
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_EQ(NO_ERROR, barrier->result);
+ }
+ EXPECT_FALSE(sRE->isImageCachedForTesting(bufferId));
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_casterLayerMinSize) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(1, 1);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.alpha = 1.0f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ false /* casterIsTranslucent */);
+
+ drawShadow<ColorSourceVariant>(castingLayer, settings, casterColor, backgroundColor);
+ expectShadowColor(castingLayer, settings, casterColor, backgroundColor);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_casterColorLayer) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(DEFAULT_DISPLAY_WIDTH / 3.0f, DEFAULT_DISPLAY_HEIGHT / 3.0f);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.alpha = 1.0f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ false /* casterIsTranslucent */);
+
+ drawShadow<ColorSourceVariant>(castingLayer, settings, casterColor, backgroundColor);
+ expectShadowColor(castingLayer, settings, casterColor, backgroundColor);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_casterOpaqueBufferLayer) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(DEFAULT_DISPLAY_WIDTH / 3.0f, DEFAULT_DISPLAY_HEIGHT / 3.0f);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.alpha = 1.0f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ false /* casterIsTranslucent */);
+
+ drawShadow<BufferSourceVariant<ForceOpaqueBufferVariant>>(castingLayer, settings, casterColor,
+ backgroundColor);
+ expectShadowColor(castingLayer, settings, casterColor, backgroundColor);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_casterWithRoundedCorner) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(DEFAULT_DISPLAY_WIDTH / 3.0f, DEFAULT_DISPLAY_HEIGHT / 3.0f);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.geometry.roundedCornersRadius = 3.0f;
+ castingLayer.geometry.roundedCornersCrop = casterBounds.toFloatRect();
+ castingLayer.alpha = 1.0f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ false /* casterIsTranslucent */);
+
+ drawShadow<BufferSourceVariant<ForceOpaqueBufferVariant>>(castingLayer, settings, casterColor,
+ backgroundColor);
+ expectShadowColor(castingLayer, settings, casterColor, backgroundColor);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_translucentCasterWithAlpha) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(DEFAULT_DISPLAY_WIDTH / 3.0f, DEFAULT_DISPLAY_HEIGHT / 3.0f);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.alpha = 0.5f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ true /* casterIsTranslucent */);
+
+ drawShadow<BufferSourceVariant<RelaxOpaqueBufferVariant>>(castingLayer, settings, casterColor,
+ backgroundColor);
+
+ // verify only the background since the shadow will draw behind the caster
+ const float shadowInset = settings.length * -1.0f;
+ const Rect casterWithShadow =
+ Rect(casterBounds).inset(shadowInset, shadowInset, shadowInset, shadowInset);
+ const Region backgroundRegion = Region(fullscreenRect()).subtractSelf(casterWithShadow);
+ expectBufferColor(backgroundRegion, backgroundColor.r, backgroundColor.g, backgroundColor.b,
+ backgroundColor.a);
+}
+
+TEST_F(RenderEngineTest, cleanupPostRender_cleansUpOnce) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0;
+ layers.push_back(&layer);
+
+ base::unique_fd fenceOne;
+ sRE->drawLayers(settings, layers, mBuffer, true, base::unique_fd(), &fenceOne);
+ base::unique_fd fenceTwo;
+ sRE->drawLayers(settings, layers, mBuffer, true, std::move(fenceOne), &fenceTwo);
+
+ const int fd = fenceTwo.get();
+ if (fd >= 0) {
+ sync_wait(fd, -1);
+ }
+ // Only cleanup the first time.
+ EXPECT_TRUE(sRE->cleanupPostRender(
+ renderengine::RenderEngine::CleanupMode::CLEAN_OUTPUT_RESOURCES));
+ EXPECT_FALSE(sRE->cleanupPostRender(
+ renderengine::RenderEngine::CleanupMode::CLEAN_OUTPUT_RESOURCES));
+}
+
+TEST_F(RenderEngineTest, cleanupPostRender_whenCleaningAll_replacesTextureMemory) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0;
+ layers.push_back(&layer);
+
+ base::unique_fd fence;
+ sRE->drawLayers(settings, layers, mBuffer, true, base::unique_fd(), &fence);
+
+ const int fd = fence.get();
+ if (fd >= 0) {
+ sync_wait(fd, -1);
+ }
+
+ uint64_t bufferId = layer.source.buffer.buffer->getId();
+ uint32_t texName = layer.source.buffer.textureName;
+ EXPECT_TRUE(sRE->isImageCachedForTesting(bufferId));
+ EXPECT_EQ(bufferId, sRE->getBufferIdForTextureNameForTesting(texName));
+
+ EXPECT_TRUE(sRE->cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL));
+
+ // Now check that our view of memory is good.
+ EXPECT_FALSE(sRE->isImageCachedForTesting(bufferId));
+ EXPECT_EQ(std::nullopt, sRE->getBufferIdForTextureNameForTesting(bufferId));
+ EXPECT_TRUE(sRE->isTextureNameKnownForTesting(texName));
+}
+
+} // namespace android
+
+// TODO(b/129481165): remove the #pragma below and fix conversion issues
+#pragma clang diagnostic pop // ignored "-Wconversion"
diff --git a/media/libstagefright/renderfright/tests/RenderEngineThreadedTest.cpp b/media/libstagefright/renderfright/tests/RenderEngineThreadedTest.cpp
new file mode 100644
index 0000000..97c7442
--- /dev/null
+++ b/media/libstagefright/renderfright/tests/RenderEngineThreadedTest.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cutils/properties.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <renderengine/mock/RenderEngine.h>
+#include "../threaded/RenderEngineThreaded.h"
+
+namespace android {
+
+using testing::_;
+using testing::Eq;
+using testing::Mock;
+using testing::Return;
+
+struct RenderEngineThreadedTest : public ::testing::Test {
+ ~RenderEngineThreadedTest() {}
+
+ void SetUp() override {
+ mThreadedRE = renderengine::threaded::RenderEngineThreaded::create(
+ [this]() { return std::unique_ptr<renderengine::RenderEngine>(mRenderEngine); });
+ }
+
+ std::unique_ptr<renderengine::threaded::RenderEngineThreaded> mThreadedRE;
+ renderengine::mock::RenderEngine* mRenderEngine = new renderengine::mock::RenderEngine();
+};
+
+TEST_F(RenderEngineThreadedTest, dump) {
+ std::string testString = "XYZ";
+ EXPECT_CALL(*mRenderEngine, dump(_));
+ mThreadedRE->dump(testString);
+}
+
+TEST_F(RenderEngineThreadedTest, primeCache) {
+ EXPECT_CALL(*mRenderEngine, primeCache());
+ mThreadedRE->primeCache();
+}
+
+TEST_F(RenderEngineThreadedTest, genTextures) {
+ uint32_t texName;
+ EXPECT_CALL(*mRenderEngine, genTextures(1, &texName));
+ mThreadedRE->genTextures(1, &texName);
+}
+
+TEST_F(RenderEngineThreadedTest, deleteTextures) {
+ uint32_t texName;
+ EXPECT_CALL(*mRenderEngine, deleteTextures(1, &texName));
+ mThreadedRE->deleteTextures(1, &texName);
+}
+
+TEST_F(RenderEngineThreadedTest, bindExternalBuffer_nullptrBuffer) {
+ EXPECT_CALL(*mRenderEngine, bindExternalTextureBuffer(0, Eq(nullptr), Eq(nullptr)))
+ .WillOnce(Return(BAD_VALUE));
+ status_t result = mThreadedRE->bindExternalTextureBuffer(0, nullptr, nullptr);
+ ASSERT_EQ(BAD_VALUE, result);
+}
+
+TEST_F(RenderEngineThreadedTest, bindExternalBuffer_withBuffer) {
+ sp<GraphicBuffer> buf = new GraphicBuffer();
+ EXPECT_CALL(*mRenderEngine, bindExternalTextureBuffer(0, buf, Eq(nullptr)))
+ .WillOnce(Return(NO_ERROR));
+ status_t result = mThreadedRE->bindExternalTextureBuffer(0, buf, nullptr);
+ ASSERT_EQ(NO_ERROR, result);
+}
+
+TEST_F(RenderEngineThreadedTest, cacheExternalTextureBuffer_nullptr) {
+ EXPECT_CALL(*mRenderEngine, cacheExternalTextureBuffer(Eq(nullptr)));
+ mThreadedRE->cacheExternalTextureBuffer(nullptr);
+}
+
+TEST_F(RenderEngineThreadedTest, cacheExternalTextureBuffer_withBuffer) {
+ sp<GraphicBuffer> buf = new GraphicBuffer();
+ EXPECT_CALL(*mRenderEngine, cacheExternalTextureBuffer(buf));
+ mThreadedRE->cacheExternalTextureBuffer(buf);
+}
+
+TEST_F(RenderEngineThreadedTest, unbindExternalTextureBuffer) {
+ EXPECT_CALL(*mRenderEngine, unbindExternalTextureBuffer(0x0));
+ mThreadedRE->unbindExternalTextureBuffer(0x0);
+}
+
+TEST_F(RenderEngineThreadedTest, bindFrameBuffer_returnsBadValue) {
+ std::unique_ptr<renderengine::Framebuffer> framebuffer;
+ EXPECT_CALL(*mRenderEngine, bindFrameBuffer(framebuffer.get())).WillOnce(Return(BAD_VALUE));
+ status_t result = mThreadedRE->bindFrameBuffer(framebuffer.get());
+ ASSERT_EQ(BAD_VALUE, result);
+}
+
+TEST_F(RenderEngineThreadedTest, bindFrameBuffer_returnsNoError) {
+ std::unique_ptr<renderengine::Framebuffer> framebuffer;
+ EXPECT_CALL(*mRenderEngine, bindFrameBuffer(framebuffer.get())).WillOnce(Return(NO_ERROR));
+ status_t result = mThreadedRE->bindFrameBuffer(framebuffer.get());
+ ASSERT_EQ(NO_ERROR, result);
+}
+
+TEST_F(RenderEngineThreadedTest, unbindFrameBuffer) {
+ std::unique_ptr<renderengine::Framebuffer> framebuffer;
+ EXPECT_CALL(*mRenderEngine, unbindFrameBuffer(framebuffer.get()));
+ mThreadedRE->unbindFrameBuffer(framebuffer.get());
+}
+
+TEST_F(RenderEngineThreadedTest, getMaxTextureSize_returns20) {
+ size_t size = 20;
+ EXPECT_CALL(*mRenderEngine, getMaxTextureSize()).WillOnce(Return(size));
+ size_t result = mThreadedRE->getMaxTextureSize();
+ ASSERT_EQ(size, result);
+}
+
+TEST_F(RenderEngineThreadedTest, getMaxTextureSize_returns0) {
+ size_t size = 0;
+ EXPECT_CALL(*mRenderEngine, getMaxTextureSize()).WillOnce(Return(size));
+ size_t result = mThreadedRE->getMaxTextureSize();
+ ASSERT_EQ(size, result);
+}
+
+TEST_F(RenderEngineThreadedTest, getMaxViewportDims_returns20) {
+ size_t dims = 20;
+ EXPECT_CALL(*mRenderEngine, getMaxViewportDims()).WillOnce(Return(dims));
+ size_t result = mThreadedRE->getMaxViewportDims();
+ ASSERT_EQ(dims, result);
+}
+
+TEST_F(RenderEngineThreadedTest, getMaxViewportDims_returns0) {
+ size_t dims = 0;
+ EXPECT_CALL(*mRenderEngine, getMaxViewportDims()).WillOnce(Return(dims));
+ size_t result = mThreadedRE->getMaxViewportDims();
+ ASSERT_EQ(dims, result);
+}
+
+TEST_F(RenderEngineThreadedTest, isProtected_returnsFalse) {
+ EXPECT_CALL(*mRenderEngine, isProtected()).WillOnce(Return(false));
+ status_t result = mThreadedRE->isProtected();
+ ASSERT_EQ(false, result);
+}
+
+TEST_F(RenderEngineThreadedTest, isProtected_returnsTrue) {
+ EXPECT_CALL(*mRenderEngine, isProtected()).WillOnce(Return(true));
+ size_t result = mThreadedRE->isProtected();
+ ASSERT_EQ(true, result);
+}
+
+TEST_F(RenderEngineThreadedTest, supportsProtectedContent_returnsFalse) {
+ EXPECT_CALL(*mRenderEngine, supportsProtectedContent()).WillOnce(Return(false));
+ status_t result = mThreadedRE->supportsProtectedContent();
+ ASSERT_EQ(false, result);
+}
+
+TEST_F(RenderEngineThreadedTest, supportsProtectedContent_returnsTrue) {
+ EXPECT_CALL(*mRenderEngine, supportsProtectedContent()).WillOnce(Return(true));
+ status_t result = mThreadedRE->supportsProtectedContent();
+ ASSERT_EQ(true, result);
+}
+
+TEST_F(RenderEngineThreadedTest, useProtectedContext_returnsFalse) {
+ EXPECT_CALL(*mRenderEngine, useProtectedContext(false)).WillOnce(Return(false));
+ status_t result = mThreadedRE->useProtectedContext(false);
+ ASSERT_EQ(false, result);
+}
+
+TEST_F(RenderEngineThreadedTest, useProtectedContext_returnsTrue) {
+ EXPECT_CALL(*mRenderEngine, useProtectedContext(false)).WillOnce(Return(true));
+ status_t result = mThreadedRE->useProtectedContext(false);
+ ASSERT_EQ(true, result);
+}
+
+TEST_F(RenderEngineThreadedTest, cleanupPostRender_returnsFalse) {
+ EXPECT_CALL(*mRenderEngine,
+ cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL))
+ .WillOnce(Return(false));
+ status_t result =
+ mThreadedRE->cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL);
+ ASSERT_EQ(false, result);
+}
+
+TEST_F(RenderEngineThreadedTest, cleanupPostRender_returnsTrue) {
+ EXPECT_CALL(*mRenderEngine,
+ cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL))
+ .WillOnce(Return(true));
+ status_t result =
+ mThreadedRE->cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL);
+ ASSERT_EQ(true, result);
+}
+
+TEST_F(RenderEngineThreadedTest, drawLayers) {
+ renderengine::DisplaySettings settings;
+ std::vector<const renderengine::LayerSettings*> layers;
+ sp<GraphicBuffer> buffer = new GraphicBuffer();
+ base::unique_fd bufferFence;
+ base::unique_fd drawFence;
+
+ EXPECT_CALL(*mRenderEngine, drawLayers)
+ .WillOnce([](const renderengine::DisplaySettings&,
+ const std::vector<const renderengine::LayerSettings*>&,
+ const sp<GraphicBuffer>&, const bool, base::unique_fd&&,
+ base::unique_fd*) -> status_t { return NO_ERROR; });
+
+ status_t result = mThreadedRE->drawLayers(settings, layers, buffer, false,
+ std::move(bufferFence), &drawFence);
+ ASSERT_EQ(NO_ERROR, result);
+}
+
+} // namespace android
diff --git a/media/libstagefright/renderfright/threaded/RenderEngineThreaded.cpp b/media/libstagefright/renderfright/threaded/RenderEngineThreaded.cpp
new file mode 100644
index 0000000..d4184fd
--- /dev/null
+++ b/media/libstagefright/renderfright/threaded/RenderEngineThreaded.cpp
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "RenderEngineThreaded.h"
+
+#include <sched.h>
+#include <chrono>
+#include <future>
+
+#include <android-base/stringprintf.h>
+#include <private/gui/SyncFeatures.h>
+#include <utils/Trace.h>
+
+#include "gl/GLESRenderEngine.h"
+
+using namespace std::chrono_literals;
+
+namespace android {
+namespace renderengine {
+namespace threaded {
+
+std::unique_ptr<RenderEngineThreaded> RenderEngineThreaded::create(CreateInstanceFactory factory) {
+ return std::make_unique<RenderEngineThreaded>(std::move(factory));
+}
+
+RenderEngineThreaded::RenderEngineThreaded(CreateInstanceFactory factory) {
+ ATRACE_CALL();
+
+ std::lock_guard lockThread(mThreadMutex);
+ mThread = std::thread(&RenderEngineThreaded::threadMain, this, factory);
+}
+
+RenderEngineThreaded::~RenderEngineThreaded() {
+ {
+ std::lock_guard lock(mThreadMutex);
+ mRunning = false;
+ mCondition.notify_one();
+ }
+
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+}
+
+// NO_THREAD_SAFETY_ANALYSIS is because std::unique_lock presently lacks thread safety annotations.
+void RenderEngineThreaded::threadMain(CreateInstanceFactory factory) NO_THREAD_SAFETY_ANALYSIS {
+ ATRACE_CALL();
+
+ struct sched_param param = {0};
+ param.sched_priority = 2;
+ if (sched_setscheduler(0, SCHED_FIFO, ¶m) != 0) {
+ ALOGE("Couldn't set SCHED_FIFO");
+ }
+
+ mRenderEngine = factory();
+
+ std::unique_lock<std::mutex> lock(mThreadMutex);
+ pthread_setname_np(pthread_self(), mThreadName);
+
+ while (mRunning) {
+ if (!mFunctionCalls.empty()) {
+ auto task = mFunctionCalls.front();
+ mFunctionCalls.pop();
+ task(*mRenderEngine);
+ }
+ mCondition.wait(lock, [this]() REQUIRES(mThreadMutex) {
+ return !mRunning || !mFunctionCalls.empty();
+ });
+ }
+}
+
+void RenderEngineThreaded::primeCache() const {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::primeCache");
+ instance.primeCache();
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+void RenderEngineThreaded::dump(std::string& result) {
+ std::promise<std::string> resultPromise;
+ std::future<std::string> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &result](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::dump");
+ std::string localResult = result;
+ instance.dump(localResult);
+ resultPromise.set_value(std::move(localResult));
+ });
+ }
+ mCondition.notify_one();
+ // Note: This is an rvalue.
+ result.assign(resultFuture.get());
+}
+
+bool RenderEngineThreaded::useNativeFenceSync() const {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& /*instance*/) {
+ ATRACE_NAME("REThreaded::useNativeFenceSync");
+ bool returnValue = SyncFeatures::getInstance().useNativeFenceSync();
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::useWaitSync() const {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& /*instance*/) {
+ ATRACE_NAME("REThreaded::useWaitSync");
+ bool returnValue = SyncFeatures::getInstance().useWaitSync();
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+void RenderEngineThreaded::genTextures(size_t count, uint32_t* names) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, count, names](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::genTextures");
+ instance.genTextures(count, names);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+void RenderEngineThreaded::deleteTextures(size_t count, uint32_t const* names) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, count, &names](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::deleteTextures");
+ instance.deleteTextures(count, names);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+void RenderEngineThreaded::bindExternalTextureImage(uint32_t texName, const Image& image) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push(
+ [&resultPromise, texName, &image](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::bindExternalTextureImage");
+ instance.bindExternalTextureImage(texName, image);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+status_t RenderEngineThreaded::bindExternalTextureBuffer(uint32_t texName,
+ const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& fence) {
+ std::promise<status_t> resultPromise;
+ std::future<status_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push(
+ [&resultPromise, texName, &buffer, &fence](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::bindExternalTextureBuffer");
+ status_t status = instance.bindExternalTextureBuffer(texName, buffer, fence);
+ resultPromise.set_value(status);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+void RenderEngineThreaded::cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &buffer](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::cacheExternalTextureBuffer");
+ instance.cacheExternalTextureBuffer(buffer);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+void RenderEngineThreaded::unbindExternalTextureBuffer(uint64_t bufferId) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &bufferId](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::unbindExternalTextureBuffer");
+ instance.unbindExternalTextureBuffer(bufferId);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+status_t RenderEngineThreaded::bindFrameBuffer(Framebuffer* framebuffer) {
+ std::promise<status_t> resultPromise;
+ std::future<status_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &framebuffer](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::bindFrameBuffer");
+ status_t status = instance.bindFrameBuffer(framebuffer);
+ resultPromise.set_value(status);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+void RenderEngineThreaded::unbindFrameBuffer(Framebuffer* framebuffer) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &framebuffer](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::unbindFrameBuffer");
+ instance.unbindFrameBuffer(framebuffer);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+size_t RenderEngineThreaded::getMaxTextureSize() const {
+ std::promise<size_t> resultPromise;
+ std::future<size_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::getMaxTextureSize");
+ size_t size = instance.getMaxTextureSize();
+ resultPromise.set_value(size);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+size_t RenderEngineThreaded::getMaxViewportDims() const {
+ std::promise<size_t> resultPromise;
+ std::future<size_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::getMaxViewportDims");
+ size_t size = instance.getMaxViewportDims();
+ resultPromise.set_value(size);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::isProtected() const {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::isProtected");
+ bool returnValue = instance.isProtected();
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::supportsProtectedContent() const {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::supportsProtectedContent");
+ bool returnValue = instance.supportsProtectedContent();
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::useProtectedContext(bool useProtectedContext) {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push(
+ [&resultPromise, useProtectedContext](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::useProtectedContext");
+ bool returnValue = instance.useProtectedContext(useProtectedContext);
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+Framebuffer* RenderEngineThreaded::getFramebufferForDrawing() {
+ std::promise<Framebuffer*> resultPromise;
+ std::future<Framebuffer*> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::getFramebufferForDrawing");
+ Framebuffer* framebuffer = instance.getFramebufferForDrawing();
+ resultPromise.set_value(framebuffer);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::cleanupPostRender(CleanupMode mode) {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, mode](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::cleanupPostRender");
+ bool returnValue = instance.cleanupPostRender(mode);
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+status_t RenderEngineThreaded::drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer,
+ const bool useFramebufferCache,
+ base::unique_fd&& bufferFence,
+ base::unique_fd* drawFence) {
+ std::promise<status_t> resultPromise;
+ std::future<status_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &display, &layers, &buffer, useFramebufferCache,
+ &bufferFence, &drawFence](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::drawLayers");
+ status_t status = instance.drawLayers(display, layers, buffer, useFramebufferCache,
+ std::move(bufferFence), drawFence);
+ resultPromise.set_value(status);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+} // namespace threaded
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/threaded/RenderEngineThreaded.h b/media/libstagefright/renderfright/threaded/RenderEngineThreaded.h
new file mode 100644
index 0000000..86a49e9
--- /dev/null
+++ b/media/libstagefright/renderfright/threaded/RenderEngineThreaded.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include "renderengine/RenderEngine.h"
+
+namespace android {
+namespace renderengine {
+namespace threaded {
+
+using CreateInstanceFactory = std::function<std::unique_ptr<renderengine::RenderEngine>()>;
+
+/**
+ * This class extends a basic RenderEngine class. It contains a thread. Each time a function of
+ * this class is called, we create a lambda function that is put on a queue. The main thread then
+ * executes the functions in order.
+ */
+class RenderEngineThreaded : public RenderEngine {
+public:
+ static std::unique_ptr<RenderEngineThreaded> create(CreateInstanceFactory factory);
+
+ RenderEngineThreaded(CreateInstanceFactory factory);
+ ~RenderEngineThreaded() override;
+ void primeCache() const override;
+
+ void dump(std::string& result) override;
+
+ bool useNativeFenceSync() const override;
+ bool useWaitSync() const override;
+ void genTextures(size_t count, uint32_t* names) override;
+ void deleteTextures(size_t count, uint32_t const* names) override;
+ void bindExternalTextureImage(uint32_t texName, const Image& image) override;
+ status_t bindExternalTextureBuffer(uint32_t texName, const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& fence) override;
+ void cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) override;
+ void unbindExternalTextureBuffer(uint64_t bufferId) override;
+ status_t bindFrameBuffer(Framebuffer* framebuffer) override;
+ void unbindFrameBuffer(Framebuffer* framebuffer) override;
+ size_t getMaxTextureSize() const override;
+ size_t getMaxViewportDims() const override;
+
+ bool isProtected() const override;
+ bool supportsProtectedContent() const override;
+ bool useProtectedContext(bool useProtectedContext) override;
+ bool cleanupPostRender(CleanupMode mode) override;
+
+ status_t drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer, const bool useFramebufferCache,
+ base::unique_fd&& bufferFence, base::unique_fd* drawFence) override;
+
+protected:
+ Framebuffer* getFramebufferForDrawing() override;
+
+private:
+ void threadMain(CreateInstanceFactory factory);
+
+ /* ------------------------------------------------------------------------
+ * Threading
+ */
+ const char* const mThreadName = "RenderEngineThread";
+ // Protects the creation and destruction of mThread.
+ mutable std::mutex mThreadMutex;
+ std::thread mThread GUARDED_BY(mThreadMutex);
+ bool mRunning GUARDED_BY(mThreadMutex) = true;
+ mutable std::queue<std::function<void(renderengine::RenderEngine& instance)>> mFunctionCalls
+ GUARDED_BY(mThreadMutex);
+ mutable std::condition_variable mCondition;
+
+ /* ------------------------------------------------------------------------
+ * Render Engine
+ */
+ std::unique_ptr<renderengine::RenderEngine> mRenderEngine;
+};
+} // namespace threaded
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/tests/HEVC/Android.bp b/media/libstagefright/tests/HEVC/Android.bp
index 7a6b959..3762553 100644
--- a/media/libstagefright/tests/HEVC/Android.bp
+++ b/media/libstagefright/tests/HEVC/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "HEVCUtilsUnitTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/timedtext/TEST_MAPPING b/media/libstagefright/timedtext/TEST_MAPPING
index 185f824..35a5b11 100644
--- a/media/libstagefright/timedtext/TEST_MAPPING
+++ b/media/libstagefright/timedtext/TEST_MAPPING
@@ -1,7 +1,9 @@
// mappings for frameworks/av/media/libstagefright/timedtext
{
- "presubmit": [
- // TODO(b/148094059): unit tests not allowed to download content
- //{ "name": "TimedTextUnitTest" }
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "TimedTextUnitTest" }
]
}
diff --git a/media/libstagefright/timedtext/test/Android.bp b/media/libstagefright/timedtext/test/Android.bp
index 36f8891..11e5077 100644
--- a/media/libstagefright/timedtext/test/Android.bp
+++ b/media/libstagefright/timedtext/test/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "TimedTextUnitTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/mtp/Android.bp b/media/mtp/Android.bp
index 66a3139..e572249 100644
--- a/media/mtp/Android.bp
+++ b/media/mtp/Android.bp
@@ -52,5 +52,6 @@
"liblog",
"libusbhost",
],
+ header_libs: ["libcutils_headers"],
}
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index af21a99..d771095 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -45,6 +45,10 @@
return AMEDIA_OK;
} else if (err == -EAGAIN) {
return (media_status_t) AMEDIACODEC_INFO_TRY_AGAIN_LATER;
+ } else if (err == NO_MEMORY) {
+ return AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE;
+ } else if (err == DEAD_OBJECT) {
+ return AMEDIACODEC_ERROR_RECLAIMED;
}
ALOGE("sf error code: %d", err);
return AMEDIA_ERROR_UNKNOWN;
@@ -255,7 +259,7 @@
break;
}
msg->findString("detail", &detail);
- ALOGE("Decoder reported error(0x%x), actionCode(%d), detail(%s)",
+ ALOGE("Codec reported error(0x%x), actionCode(%d), detail(%s)",
err, actionCode, detail.c_str());
Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index 8680641..73c52a9 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -364,6 +364,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_SAR_WIDTH = "sar-width";
EXPORT const char* AMEDIAFORMAT_KEY_SEI = "sei";
EXPORT const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT = "slice-height";
+EXPORT const char* AMEDIAFORMAT_KEY_SLOW_MOTION_MARKERS = "slow-motion-markers";
EXPORT const char* AMEDIAFORMAT_KEY_STRIDE = "stride";
EXPORT const char* AMEDIAFORMAT_KEY_TARGET_TIME = "target-time";
EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_COUNT = "temporal-layer-count";
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 6371de4..394b972 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -322,6 +322,10 @@
extern const char* AMEDIAFORMAT_KEY_LOW_LATENCY __INTRODUCED_IN(30);
#endif /* __ANDROID_API__ >= 30 */
+#if __ANDROID_API__ >= 31
+extern const char* AMEDIAFORMAT_KEY_SLOW_MOTION_MARKERS __INTRODUCED_IN(31);
+#endif /* __ANDROID_API__ >= 31 */
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 29f1da8..bd3337e 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -131,6 +131,7 @@
AMEDIAFORMAT_KEY_SAR_WIDTH; # var introduced=29
AMEDIAFORMAT_KEY_SEI; # var introduced=28
AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_SLOW_MOTION_MARKERS; # var introduced=31
AMEDIAFORMAT_KEY_STRIDE; # var introduced=21
AMEDIAFORMAT_KEY_TARGET_TIME; # var introduced=29
AMEDIAFORMAT_KEY_TEMPORAL_LAYER_COUNT; # var introduced=29
diff --git a/media/utils/ProcessInfo.cpp b/media/utils/ProcessInfo.cpp
index 113e4a7..19225d3 100644
--- a/media/utils/ProcessInfo.cpp
+++ b/media/utils/ProcessInfo.cpp
@@ -27,6 +27,9 @@
namespace android {
+static constexpr int32_t INVALID_ADJ = -10000;
+static constexpr int32_t NATIVE_ADJ = -1000;
+
ProcessInfo::ProcessInfo() {}
bool ProcessInfo::getPriority(int pid, int* priority) {
@@ -35,8 +38,6 @@
size_t length = 1;
int32_t state;
- static const int32_t INVALID_ADJ = -10000;
- static const int32_t NATIVE_ADJ = -1000;
int32_t score = INVALID_ADJ;
status_t err = service->getProcessStatesAndOomScoresFromPids(length, &pid, &state, &score);
if (err != OK) {
@@ -45,8 +46,17 @@
}
ALOGV("pid %d state %d score %d", pid, state, score);
if (score <= NATIVE_ADJ) {
- ALOGE("pid %d invalid OOM adjustments value %d", pid, score);
- return false;
+ std::scoped_lock lock{mOverrideLock};
+
+ // If this process if not tracked by ActivityManagerService, look for overrides.
+ auto it = mOverrideMap.find(pid);
+ if (it != mOverrideMap.end()) {
+ ALOGI("pid %d invalid OOM score %d, override to %d", pid, score, it->second.oomScore);
+ score = it->second.oomScore;
+ } else {
+ ALOGE("pid %d invalid OOM score %d", pid, score);
+ return false;
+ }
}
// Use OOM adjustments value as the priority. Lower the value, higher the priority.
@@ -61,6 +71,26 @@
return (callingPid == getpid()) || (callingPid == pid) || (callingUid == AID_MEDIA);
}
+bool ProcessInfo::overrideProcessInfo(int pid, int procState, int oomScore) {
+ std::scoped_lock lock{mOverrideLock};
+
+ mOverrideMap.erase(pid);
+
+ // Disable the override if oomScore is set to NATIVE_ADJ or below.
+ if (oomScore <= NATIVE_ADJ) {
+ return false;
+ }
+
+ mOverrideMap.emplace(pid, ProcessInfoOverride{procState, oomScore});
+ return true;
+}
+
+void ProcessInfo::removeProcessInfoOverride(int pid) {
+ std::scoped_lock lock{mOverrideLock};
+
+ mOverrideMap.erase(pid);
+}
+
ProcessInfo::~ProcessInfo() {}
} // namespace android
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 34bdac5..fe45221 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -855,7 +855,8 @@
input.notificationsPerBuffer, input.speed,
input.sharedBuffer, sessionId, &output.flags,
callingPid, input.clientInfo.clientTid, clientUid,
- &lStatus, portId, input.audioTrackCallback);
+ &lStatus, portId, input.audioTrackCallback,
+ input.opPackageName);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
// we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
@@ -2071,8 +2072,8 @@
Mutex::Autolock _l(mLock);
RecordThread *thread = checkRecordThread_l(output.inputId);
if (thread == NULL) {
- ALOGE("createRecord() checkRecordThread_l failed, input handle %d", output.inputId);
- lStatus = BAD_VALUE;
+ ALOGW("createRecord() checkRecordThread_l failed, input handle %d", output.inputId);
+ lStatus = FAILED_TRANSACTION;
goto Exit;
}
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index d05c8b8..a4b8650 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -26,10 +26,11 @@
bool hasOpPlayAudio() const;
static sp<OpPlayAudioMonitor> createIfNeeded(
- uid_t uid, const audio_attributes_t& attr, int id, audio_stream_type_t streamType);
+ uid_t uid, const audio_attributes_t& attr, int id, audio_stream_type_t streamType,
+ const std::string& opPackageName);
private:
- OpPlayAudioMonitor(uid_t uid, audio_usage_t usage, int id);
+ OpPlayAudioMonitor(uid_t uid, audio_usage_t usage, int id, const String16& opPackageName);
void onFirstRef() override;
static void getPackagesForUid(uid_t uid, Vector<String16>& packages);
@@ -49,10 +50,10 @@
void checkPlayAudioForUsage();
std::atomic_bool mHasOpPlayAudio;
- Vector<String16> mPackages;
const uid_t mUid;
const int32_t mUsage; // on purpose not audio_usage_t because always checked in appOps as int32_t
const int mId; // for logging purposes only
+ const String16 mOpPackageName;
};
// playback track
@@ -77,7 +78,8 @@
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE,
/** default behaviour is to start when there are as many frames
* ready as possible (aka. Buffer is full). */
- size_t frameCountToBeReady = SIZE_MAX);
+ size_t frameCountToBeReady = SIZE_MAX,
+ const std::string opPackageName = "");
virtual ~Track();
virtual status_t initCheck() const;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index affc09e..1e0bcac 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2080,7 +2080,8 @@
uid_t uid,
status_t *status,
audio_port_handle_t portId,
- const sp<media::IAudioTrackCallback>& callback)
+ const sp<media::IAudioTrackCallback>& callback,
+ const std::string& opPackageName)
{
size_t frameCount = *pFrameCount;
size_t notificationFrameCount = *pNotificationFrameCount;
@@ -2371,7 +2372,8 @@
track = new Track(this, client, streamType, attr, sampleRate, format,
channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
- sessionId, creatorPid, uid, trackFlags, TrackBase::TYPE_DEFAULT, portId);
+ sessionId, creatorPid, uid, trackFlags, TrackBase::TYPE_DEFAULT, portId,
+ SIZE_MAX /*frameCountToBeReady*/, opPackageName);
lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
if (lStatus != NO_ERROR) {
@@ -2383,7 +2385,7 @@
{
Mutex::Autolock _atCbL(mAudioTrackCbLock);
if (callback.get() != nullptr) {
- mAudioTrackCallbacks.emplace(callback);
+ mAudioTrackCallbacks.emplace(track, callback);
}
}
@@ -2617,6 +2619,10 @@
mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.string());
mTracks.remove(track);
+ {
+ Mutex::Autolock _atCbL(mAudioTrackCbLock);
+ mAudioTrackCallbacks.erase(track);
+ }
if (track->isFastTrack()) {
int index = track->mFastIndex;
ALOG_ASSERT(0 < index && index < (int)FastMixerState::sMaxFastTracks);
@@ -2712,8 +2718,8 @@
audio_utils::metadata::byteStringFromData(metadata);
std::vector metadataVec(metaDataStr.begin(), metaDataStr.end());
Mutex::Autolock _l(mAudioTrackCbLock);
- for (const auto& callback : mAudioTrackCallbacks) {
- callback->onCodecFormatChanged(metadataVec);
+ for (const auto& callbackPair : mAudioTrackCallbacks) {
+ callbackPair.second->onCodecFormatChanged(metadataVec);
}
}).detach();
}
@@ -7911,7 +7917,8 @@
AutoMutex lock(mLock);
if (recordTrack->isInvalid()) {
recordTrack->clearSyncStartEvent();
- return INVALID_OPERATION;
+ ALOGW("%s track %d: invalidated before startInput", __func__, recordTrack->portId());
+ return DEAD_OBJECT;
}
if (mActiveTracks.indexOf(recordTrack) >= 0) {
if (recordTrack->mState == TrackBase::PAUSING) {
@@ -7941,7 +7948,8 @@
recordTrack->mState = TrackBase::STARTING_2;
// STARTING_2 forces destroy to call stopInput.
}
- return INVALID_OPERATION;
+ ALOGW("%s track %d: invalidated after startInput", __func__, recordTrack->portId());
+ return DEAD_OBJECT;
}
if (recordTrack->mState != TrackBase::STARTING_1) {
ALOGW("%s(%d): unsynchronized mState:%d change",
@@ -8773,7 +8781,6 @@
AudioFlinger::MmapThread::~MmapThread()
{
- releaseWakeLock_l();
}
void AudioFlinger::MmapThread::onFirstRef()
@@ -8823,7 +8830,6 @@
return NO_INIT;
}
mStandby = true;
- acquireWakeLock();
return mHalStream->createMmapBuffer(minSizeFrames, info);
}
@@ -8862,8 +8868,12 @@
status_t ret;
if (*handle == mPortId) {
- // for the first track, reuse portId and session allocated when the stream was opened
- return exitStandby();
+ // For the first track, reuse portId and session allocated when the stream was opened.
+ ret = exitStandby();
+ if (ret == NO_ERROR) {
+ acquireWakeLock();
+ }
+ return ret;
}
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
@@ -8984,6 +8994,7 @@
if (handle == mPortId) {
mHalStream->stop();
+ releaseWakeLock();
return NO_ERROR;
}
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index ac41e82..7db7c86 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -884,7 +884,8 @@
uid_t uid,
status_t *status /*non-NULL*/,
audio_port_handle_t portId,
- const sp<media::IAudioTrackCallback>& callback);
+ const sp<media::IAudioTrackCallback>& callback,
+ const std::string& opPackageName);
AudioStreamOut* getOutput() const;
AudioStreamOut* clearOutput();
@@ -1218,7 +1219,7 @@
Mutex mAudioTrackCbLock;
// Record of IAudioTrackCallback
- std::set<sp<media::IAudioTrackCallback>> mAudioTrackCallbacks;
+ std::map<sp<Track>, sp<media::IAudioTrackCallback>> mAudioTrackCallbacks;
private:
// The HAL output sink is treated as non-blocking, but current implementation is blocking
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index c92bce5..fbfe077 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -386,11 +386,12 @@
// static
sp<AudioFlinger::PlaybackThread::OpPlayAudioMonitor>
AudioFlinger::PlaybackThread::OpPlayAudioMonitor::createIfNeeded(
- uid_t uid, const audio_attributes_t& attr, int id, audio_stream_type_t streamType)
+ uid_t uid, const audio_attributes_t& attr, int id, audio_stream_type_t streamType,
+ const std::string& opPackageName)
{
+ Vector <String16> packages;
+ getPackagesForUid(uid, packages);
if (isServiceUid(uid)) {
- Vector <String16> packages;
- getPackagesForUid(uid, packages);
if (packages.isEmpty()) {
ALOGD("OpPlayAudio: not muting track:%d usage:%d for service UID %d",
id,
@@ -410,12 +411,32 @@
id, attr.flags);
return nullptr;
}
- return new OpPlayAudioMonitor(uid, attr.usage, id);
+
+ String16 opPackageNameStr(opPackageName.c_str());
+ if (opPackageName.empty()) {
+ // If no package name is provided by the client, use the first associated with the uid
+ if (!packages.isEmpty()) {
+ opPackageNameStr = packages[0];
+ }
+ } else {
+ // If the provided package name is invalid, we force app ops denial by clearing the package
+ // name passed to OpPlayAudioMonitor
+ if (std::find_if(packages.begin(), packages.end(),
+ [&opPackageNameStr](const auto& package) {
+ return opPackageNameStr == package; }) == packages.end()) {
+ ALOGW("The package name(%s) provided does not correspond to the uid %d, "
+ "force muting the track", opPackageName.c_str(), uid);
+ // Set package name as an empty string so that hasOpPlayAudio will always return false.
+ opPackageNameStr = String16("");
+ }
+ }
+ return new OpPlayAudioMonitor(uid, attr.usage, id, opPackageNameStr);
}
AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
- uid_t uid, audio_usage_t usage, int id)
- : mHasOpPlayAudio(true), mUid(uid), mUsage((int32_t) usage), mId(id)
+ uid_t uid, audio_usage_t usage, int id, const String16& opPackageName)
+ : mHasOpPlayAudio(true), mUid(uid), mUsage((int32_t) usage), mId(id),
+ mOpPackageName(opPackageName)
{
}
@@ -429,11 +450,10 @@
void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::onFirstRef()
{
- getPackagesForUid(mUid, mPackages);
checkPlayAudioForUsage();
- if (!mPackages.isEmpty()) {
+ if (mOpPackageName.size() != 0) {
mOpCallback = new PlayAudioOpCallback(this);
- mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO, mPackages[0], mOpCallback);
+ mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO, mOpPackageName, mOpCallback);
}
}
@@ -446,18 +466,11 @@
// - not called from PlayAudioOpCallback because the callback is not installed in this case
void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::checkPlayAudioForUsage()
{
- if (mPackages.isEmpty()) {
+ if (mOpPackageName.size() == 0) {
mHasOpPlayAudio.store(false);
} else {
- bool hasIt = true;
- for (const String16& packageName : mPackages) {
- const int32_t mode = mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO,
- mUsage, mUid, packageName);
- if (mode != AppOpsManager::MODE_ALLOWED) {
- hasIt = false;
- break;
- }
- }
+ bool hasIt = mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO,
+ mUsage, mUid, mOpPackageName) == AppOpsManager::MODE_ALLOWED;
ALOGD("OpPlayAudio: track:%d usage:%d %smuted", mId, mUsage, hasIt ? "not " : "");
mHasOpPlayAudio.store(hasIt);
}
@@ -511,7 +524,8 @@
audio_output_flags_t flags,
track_type type,
audio_port_handle_t portId,
- size_t frameCountToBeReady)
+ size_t frameCountToBeReady,
+ const std::string opPackageName)
: TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
// TODO: Using unsecurePointer() has some associated security pitfalls
// (see declaration for details).
@@ -534,7 +548,8 @@
mPresentationCompleteFrames(0),
mFrameMap(16 /* sink-frame-to-track-frame map memory */),
mVolumeHandler(new media::VolumeHandler(sampleRate)),
- mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(uid, attr, id(), streamType)),
+ mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(
+ uid, attr, id(), streamType, opPackageName)),
// mSinkTimestamp
mFrameCountToBeReady(frameCountToBeReady),
mFastIndex(-1),
@@ -601,7 +616,7 @@
// external vibration is always created for all tracks attached to haptic playback thread.
mAudioVibrationController = new AudioVibrationController(this);
mExternalVibration = new os::ExternalVibration(
- mUid, "" /* pkg */, mAttr, mAudioVibrationController);
+ mUid, opPackageName, mAttr, mAudioVibrationController);
}
// Once this item is logged by the server, the client can add properties.
@@ -2229,7 +2244,8 @@
RecordThread *recordThread = (RecordThread *)thread.get();
return recordThread->start(this, event, triggerSession);
} else {
- return BAD_VALUE;
+ ALOGW("%s track %d: thread was destroyed", __func__, portId());
+ return DEAD_OBJECT;
}
}
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 0f3ed14..93819f5 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -306,6 +306,25 @@
virtual status_t getDevicesForRoleAndStrategy(product_strategy_t strategy,
device_role_t role,
AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices) = 0;
+
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role) = 0;
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices) = 0;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 923310c..80afe9d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -113,6 +113,9 @@
const sp<AudioPolicyMix> getPrimaryMix() const {
return mPrimaryMix.promote();
};
+ bool hasLostPrimaryMix() const {
+ return mPrimaryMix.unsafe_get() && !mPrimaryMix.promote();
+ }
void setActive(bool active) override
{
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index b28381b..dcdc035 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -91,7 +91,7 @@
<!-- Output devices declaration, i.e. Sink DEVICE PORT -->
<devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
- samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
</devicePort>
<devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
diff --git a/services/audiopolicy/config/audio_policy_configuration_7_0.xml b/services/audiopolicy/config/audio_policy_configuration_7_0.xml
index 6087bf2..a9ecff3 100644
--- a/services/audiopolicy/config/audio_policy_configuration_7_0.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_7_0.xml
@@ -91,7 +91,7 @@
<!-- Output devices declaration, i.e. Sink DEVICE PORT -->
<devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
- samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
</devicePort>
<devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
diff --git a/services/audiopolicy/engine/common/Android.bp b/services/audiopolicy/engine/common/Android.bp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
old mode 100755
new mode 100644
index 804a802..4510f63
--- a/services/audiopolicy/engine/common/include/EngineBase.h
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -127,11 +127,36 @@
status_t restoreOriginVolumeCurve(audio_stream_type_t stream);
+ status_t setDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) override;
+
+ status_t addDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) override;
+
+ /**
+ * Remove devices role for capture preset. When `forceMatched` is true, the devices to be
+ * removed must all show as role for the capture preset. Otherwise, only devices that has shown
+ * as role for the capture preset will be remove.
+ */
+ status_t doRemoveDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector& devices,
+ bool forceMatched=true);
+
+ status_t removeDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector& devices) override;
+
+ status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role) override;
+
+ status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices) const override;
+
private:
AudioPolicyManagerObserver *mApmObserver = nullptr;
ProductStrategyMap mProductStrategies;
ProductStrategyPreferredRoutingMap mProductStrategyPreferredDevices;
+ CapturePresetDevicesRoleMap mCapturePresetDevicesRole;
VolumeGroupMap mVolumeGroups;
LastRemovableMediaDevices mLastRemovableMediaDevices;
audio_mode_t mPhoneState = AUDIO_MODE_NORMAL; /**< current phone state. */
diff --git a/services/audiopolicy/engine/common/include/LastRemovableMediaDevices.h b/services/audiopolicy/engine/common/include/LastRemovableMediaDevices.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index ae4f7f4..1875c10 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -19,6 +19,7 @@
#include "EngineBase.h"
#include "EngineDefaultConfig.h"
+#include "../include/EngineBase.h"
#include <TypeConverter.h>
namespace android {
@@ -423,6 +424,171 @@
return NO_ERROR;
}
+status_t EngineBase::setDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
+ }
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ mCapturePresetDevicesRole[audioSource][role] = devices;
+ // When the devices are set as preferred devices, remove them from the disabled devices.
+ doRemoveDevicesRoleForCapturePreset(
+ audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
+ break;
+ case DEVICE_ROLE_DISABLED:
+ // TODO: support setting devices role as disabled for capture preset.
+ ALOGI("%s no implemented for role as %d", __func__, role);
+ break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it is no need to set device role as none
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+status_t EngineBase::addDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
+ }
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ mCapturePresetDevicesRole[audioSource][role] = excludeDeviceTypeAddrsFrom(
+ mCapturePresetDevicesRole[audioSource][role], devices);
+ for (const auto& device : devices) {
+ mCapturePresetDevicesRole[audioSource][role].push_back(device);
+ }
+ // When the devices are set as preferred devices, remove them from the disabled devices.
+ doRemoveDevicesRoleForCapturePreset(
+ audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
+ break;
+ case DEVICE_ROLE_DISABLED:
+ // TODO: support setting devices role as disabled for capture preset.
+ ALOGI("%s no implemented for role as %d", __func__, role);
+ break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it is no need to set device role as none
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+status_t EngineBase::removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector& devices) {
+ return doRemoveDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t EngineBase::doRemoveDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector& devices, bool forceMatched)
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
+ }
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ case DEVICE_ROLE_DISABLED: {
+ if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
+ mCapturePresetDevicesRole[audioSource].count(role) == 0) {
+ return NAME_NOT_FOUND;
+ }
+ AudioDeviceTypeAddrVector remainingDevices = excludeDeviceTypeAddrsFrom(
+ mCapturePresetDevicesRole[audioSource][role], devices);
+ if (forceMatched && remainingDevices.size() !=
+ mCapturePresetDevicesRole[audioSource][role].size() - devices.size()) {
+ // There are some devices from `devicesToRemove` that are not shown in the cached record
+ return BAD_VALUE;
+ }
+ mCapturePresetDevicesRole[audioSource][role] = remainingDevices;
+ if (mCapturePresetDevicesRole[audioSource][role].empty()) {
+ // Remove the role when device list is empty
+ mCapturePresetDevicesRole[audioSource].erase(role);
+ }
+ } break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it makes no sense to remove devices with
+ // role as DEVICE_ROLE_NONE
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+status_t EngineBase::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role)
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
+ }
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
+ mCapturePresetDevicesRole[audioSource].erase(role) == 0) {
+ // no preferred device for the given audio source
+ return NAME_NOT_FOUND;
+ }
+ break;
+ case DEVICE_ROLE_DISABLED:
+ // TODO: support remove devices role as disabled for strategy.
+ ALOGI("%s no implemented for role as %d", __func__, role);
+ break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it makes no sense to remove devices with
+ // role as DEVICE_ROLE_NONE for a strategy
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+status_t EngineBase::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices) const
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
+ return BAD_VALUE;
+ }
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ case DEVICE_ROLE_DISABLED: {
+ if (mCapturePresetDevicesRole.count(audioSource) == 0) {
+ return NAME_NOT_FOUND;
+ }
+ auto devIt = mCapturePresetDevicesRole.at(audioSource).find(role);
+ if (devIt == mCapturePresetDevicesRole.at(audioSource).end()) {
+ ALOGV("%s no devices role(%d) for capture preset %u", __func__, role, audioSource);
+ return NAME_NOT_FOUND;
+ }
+
+ devices = devIt->second;
+ } break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as the DEVICE_ROLE_NONE is never set
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
void EngineBase::dump(String8 *dst) const
{
mProductStrategies.dump(dst, 2);
diff --git a/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp b/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engine/interface/EngineInterface.h b/services/audiopolicy/engine/interface/EngineInterface.h
index d45e71c..f64608d 100644
--- a/services/audiopolicy/engine/interface/EngineInterface.h
+++ b/services/audiopolicy/engine/interface/EngineInterface.h
@@ -34,6 +34,8 @@
using DeviceStrategyMap = std::map<product_strategy_t, DeviceVector>;
using StrategyVector = std::vector<product_strategy_t>;
using VolumeGroupVector = std::vector<volume_group_t>;
+using CapturePresetDevicesRoleMap =
+ std::map<audio_source_t, std::map<device_role_t, AudioDeviceTypeAddrVector>>;
/**
* This interface is dedicated to the policy manager that a Policy Engine shall implement.
@@ -332,6 +334,75 @@
virtual status_t getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
AudioDeviceTypeAddrVector &devices) const = 0;
+ /**
+ * @brief setDevicesRoleForCapturePreset sets devices role for a capture preset when available.
+ * To remove devices role, removeDevicesRoleForCapturePreset must be called. Calling
+ * clearDevicesRoleForCapturePreset will remove all devices as role. When devices role is set
+ * successfully, previously set devices for the same role and capture preset will be removed.
+ * @param audioSource the audio capture preset whose routing will be affected
+ * @param role the role of the devices for the capture preset. All device roles are defined at
+ * system/media/audio/include/system/audio_policy.h. DEVICE_ROLE_NONE is invalid
+ * for setting.
+ * @param devices the audio devices to be set
+ * @return BAD_VALUE if the capture preset or role is invalid,
+ * or NO_ERROR if the role of the devices for capture preset was set
+ */
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ /**
+ * @brief addDevicesRoleForCapturePreset adds devices role for a capture preset when available.
+ * To remove devices role, removeDevicesRoleForCapturePreset must be called. Calling
+ * clearDevicesRoleForCapturePreset will remove all devices as role.
+ * @param audioSource the audio capture preset whose routing will be affected
+ * @param role the role of the devices for the capture preset. All device roles are defined at
+ * system/media/audio/include/system/audio_policy.h. DEVICE_ROLE_NONE is invalid
+ * for setting.
+ * @param devices the audio devices to be added
+ * @return BAD_VALUE if the capture preset or role is invalid,
+ * or NO_ERROR if the role of the devices for capture preset was added
+ */
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ /**
+ * @brief removeDevicesRoleForCapturePreset removes the role of device(s) previously set
+ * for the given capture preset
+ * @param audioSource the audio capture preset whose routing will be affected
+ * @param role the role of the devices for the capture preset
+ * @param devices the devices to be removed
+ * @return BAD_VALUE if 1) the capture preset is invalid, 2) role is invalid or 3) the list of
+ * devices to be removed are not all present as role for a capture preset
+ * or NO_ERROR if the devices for this role was removed
+ */
+ virtual status_t removeDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector& devices) = 0;
+
+ /**
+ * @brief clearDevicesRoleForCapturePreset removes the role of all device(s) previously set
+ * for the given capture preset
+ * @param audioSource the audio capture preset whose routing will be affected
+ * @param role the role of the devices for the capture preset
+ * @return BAD_VALUE if the capture preset or role is invalid,
+ * or NO_ERROR if the devices for this role was removed
+ */
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role);
+
+ /**
+ * @brief getDevicesForRoleAndCapturePreset queries which devices have the specified role for
+ * the specified capture preset
+ * @param audioSource the capture preset to query
+ * @param role the role of the devices to query
+ * @param devices returns list of devices with matching role for the specified capture preset.
+ * DEVICE_ROLE_NONE is invalid as input.
+ * @return BAD_VALUE if the capture preset or role is invalid,
+ * or NAME_NOT_FOUND if no device for the role and capture preset was set
+ * or NO_ERROR if the devices parameter contains a list of devices
+ */
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices) const = 0;
+
virtual void dump(String8 *dst) const = 0;
diff --git a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
index 9a7fa8f..f060d45 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
@@ -52,13 +52,19 @@
def findBitPos(decimal):
pos = 0
i = 1
- while i != decimal:
+ while i < decimal:
i = i << 1
pos = pos + 1
if pos == 32:
return -1
- return pos
+ # TODO: b/168065706. This is just to fix the build. That the problem of devices with
+ # multiple bits set must be addressed more generally in the configurable audio policy
+ # and parameter framework.
+ if i > decimal:
+ logging.info("Device:{} which has multiple bits set is skipped. b/168065706".format(decimal))
+ return -2
+ return pos
def generateXmlStructureFile(componentTypeDict, structureTypesFile, outputFile):
@@ -74,10 +80,12 @@
if bitparameters_node is not None:
ordered_values = OrderedDict(sorted(values_dict.items(), key=lambda x: x[1]))
for key, value in ordered_values.items():
- value_node = ET.SubElement(bitparameters_node, "BitParameter")
- value_node.set('Name', key)
- value_node.set('Size', "1")
- value_node.set('Pos', str(findBitPos(value)))
+ pos = findBitPos(value)
+ if pos >= 0:
+ value_node = ET.SubElement(bitparameters_node, "BitParameter")
+ value_node.set('Name', key)
+ value_node.set('Size', "1")
+ value_node.set('Pos', str(pos))
enum_parameter_node = component_type.find("EnumParameter")
if enum_parameter_node is not None:
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 84687fd..fc3b2a2 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2281,7 +2281,7 @@
sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
if (inputDesc == 0) {
ALOGW("%s no input for client %d", __FUNCTION__, portId);
- return BAD_VALUE;
+ return DEAD_OBJECT;
}
audio_io_handle_t input = inputDesc->mIoHandle;
sp<RecordClientDescriptor> client = inputDesc->getClient(portId);
@@ -3116,7 +3116,7 @@
devices[i].mType, devices[i].getAddress(), String8(),
AUDIO_FORMAT_DEFAULT, false /*allowToCreate*/, true /*matchAddress*/);
if (devDesc == nullptr || (predicate != nullptr && !predicate(devices[i].mType))) {
- ALOGE("%s: device type %#x address %s not supported or not an output device",
+ ALOGE("%s: device type %#x address %s not supported or not match predicate",
context, devices[i].mType, devices[i].getAddress());
return false;
}
@@ -3228,6 +3228,72 @@
return mEngine->getDevicesForRoleAndStrategy(strategy, role, devices);
}
+status_t AudioPolicyManager::setDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector &devices) {
+ ALOGV("%s() audioSource=%d role=%d %s", __func__, audioSource, role,
+ dumpAudioDeviceTypeAddrVector(devices).c_str());
+
+ if (!areAllDevicesSupported(devices, audio_is_input_device, __func__)) {
+ return BAD_VALUE;
+ }
+ status_t status = mEngine->setDevicesRoleForCapturePreset(audioSource, role, devices);
+ ALOGW_IF(status != NO_ERROR,
+ "Engine could not set preferred devices %s for audio source %d role %d",
+ dumpAudioDeviceTypeAddrVector(devices).c_str(), audioSource, role);
+
+ return status;
+}
+
+status_t AudioPolicyManager::addDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector &devices) {
+ ALOGV("%s() audioSource=%d role=%d %s", __func__, audioSource, role,
+ dumpAudioDeviceTypeAddrVector(devices).c_str());
+
+ if (!areAllDevicesSupported(devices, audio_is_input_device, __func__)) {
+ return BAD_VALUE;
+ }
+ status_t status = mEngine->addDevicesRoleForCapturePreset(audioSource, role, devices);
+ ALOGW_IF(status != NO_ERROR,
+ "Engine could not add preferred devices %s for audio source %d role %d",
+ dumpAudioDeviceTypeAddrVector(devices).c_str(), audioSource, role);
+
+ return status;
+}
+
+status_t AudioPolicyManager::removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector& devices)
+{
+ ALOGV("%s() audioSource=%d role=%d devices=%s", __func__, audioSource, role,
+ dumpAudioDeviceTypeAddrVector(devices).c_str());
+
+ if (!areAllDevicesSupported(devices, audio_is_input_device, __func__)) {
+ return BAD_VALUE;
+ }
+
+ status_t status = mEngine->removeDevicesRoleForCapturePreset(
+ audioSource, role, devices);
+ ALOGW_IF(status != NO_ERROR,
+ "Engine could not remove devices role (%d) for capture preset %d", role, audioSource);
+
+ return status;
+}
+
+status_t AudioPolicyManager::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role) {
+ ALOGV("%s() audioSource=%d role=%d", __func__, audioSource, role);
+
+ status_t status = mEngine->clearDevicesRoleForCapturePreset(audioSource, role);
+ ALOGW_IF(status != NO_ERROR,
+ "Engine could not clear devices role (%d) for capture preset %d", role, audioSource);
+
+ return status;
+}
+
+status_t AudioPolicyManager::getDevicesForRoleAndCapturePreset(
+ audio_source_t audioSource, device_role_t role, AudioDeviceTypeAddrVector &devices) {
+ return mEngine->getDevicesForRoleAndCapturePreset(audioSource, role, devices);
+}
+
status_t AudioPolicyManager::setUserIdDeviceAffinities(int userId,
const AudioDeviceTypeAddrVector& devices) {
ALOGI("%s() userId=%d num devices %zu", __func__, userId, devices.size());
@@ -5288,7 +5354,7 @@
if (status != OK) {
continue;
}
- if (client->getPrimaryMix() != primaryMix) {
+ if (client->getPrimaryMix() != primaryMix || client->hasLostPrimaryMix()) {
invalidate = true;
if (desc->isStrategyActive(psId)) {
maxLatency = desc->latency();
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 11077f1..217013f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -281,6 +281,25 @@
device_role_t role,
AudioDeviceTypeAddrVector &devices);
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices);
+
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role);
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices);
+
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
audio_port_handle_t *portId,
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 7d1ad63..14e5236 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -1534,4 +1534,55 @@
return NO_ERROR;
}
+status_t AudioPolicyService::setDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector &devices)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->setDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioPolicyService::addDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector &devices)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->addDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioPolicyService::removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector& devices)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->removeDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioPolicyService::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->clearDevicesRoleForCapturePreset(audioSource, role);
+}
+
+status_t AudioPolicyService::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices);
+}
+
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index a851863..0b218c2 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -240,6 +240,25 @@
device_role_t role,
AudioDeviceTypeAddrVector &devices);
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices);
+
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role);
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices);
+
virtual status_t setUserIdDeviceAffinities(int userId,
const AudioDeviceTypeAddrVector& devices);
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index efdb241..ca03e1f 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -18,7 +18,10 @@
"libxml2",
],
- static_libs: ["libaudiopolicycomponents"],
+ static_libs: [
+ "libaudiopolicycomponents",
+ "libgmock"
+ ],
header_libs: [
"libaudiopolicycommon",
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index a0074bc..ed9ec8c 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -20,6 +20,7 @@
#include <unistd.h>
#include <gtest/gtest.h>
+#include <gmock/gmock.h>
#define LOG_TAG "APM_Test"
#include <Serializer.h>
@@ -36,6 +37,7 @@
#include "AudioPolicyTestManager.h"
using namespace android;
+using testing::UnorderedElementsAre;
TEST(AudioPolicyManagerTestInit, EngineFailure) {
AudioPolicyTestClient client;
@@ -1188,3 +1190,109 @@
EXPECT_GT(mClient->getAudioPortListUpdateCount(), prevAudioPortListUpdateCount);
EXPECT_GT(mManager->getAudioPortGeneration(), prevAudioPortGeneration);
}
+
+using DevicesRoleForCapturePresetParam = std::tuple<audio_source_t, device_role_t>;
+
+class AudioPolicyManagerDevicesRoleForCapturePresetTest
+ : public AudioPolicyManagerTestWithConfigurationFile,
+ public testing::WithParamInterface<DevicesRoleForCapturePresetParam> {
+protected:
+ // The `inputDevice` and `inputDevice2` indicate the audio devices type to be used for setting
+ // device role. They must be declared in the test_audio_policy_configuration.xml
+ AudioDeviceTypeAddr inputDevice = AudioDeviceTypeAddr(AUDIO_DEVICE_IN_BUILTIN_MIC, "");
+ AudioDeviceTypeAddr inputDevice2 = AudioDeviceTypeAddr(AUDIO_DEVICE_IN_HDMI, "");
+};
+
+TEST_P(AudioPolicyManagerDevicesRoleForCapturePresetTest, DevicesRoleForCapturePreset) {
+ const audio_source_t audioSource = std::get<0>(GetParam());
+ const device_role_t role = std::get<1>(GetParam());
+
+ // Test invalid device when setting
+ const AudioDeviceTypeAddr outputDevice(AUDIO_DEVICE_OUT_SPEAKER, "");
+ const AudioDeviceTypeAddrVector outputDevices = {outputDevice};
+ ASSERT_EQ(BAD_VALUE,
+ mManager->setDevicesRoleForCapturePreset(audioSource, role, outputDevices));
+ ASSERT_EQ(BAD_VALUE,
+ mManager->addDevicesRoleForCapturePreset(audioSource, role, outputDevices));
+ AudioDeviceTypeAddrVector devices;
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ ASSERT_TRUE(devices.empty());
+ ASSERT_EQ(BAD_VALUE,
+ mManager->removeDevicesRoleForCapturePreset(audioSource, role, outputDevices));
+
+ // Without setting, call get/remove/clear must fail
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->removeDevicesRoleForCapturePreset(audioSource, role, devices));
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->clearDevicesRoleForCapturePreset(audioSource, role));
+
+ // Test set/get devices role
+ const AudioDeviceTypeAddrVector inputDevices = {inputDevice};
+ ASSERT_EQ(NO_ERROR,
+ mManager->setDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+ ASSERT_EQ(NO_ERROR, mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_THAT(devices, UnorderedElementsAre(inputDevice));
+
+ // Test setting will change the previously set devices
+ const AudioDeviceTypeAddrVector inputDevices2 = {inputDevice2};
+ ASSERT_EQ(NO_ERROR,
+ mManager->setDevicesRoleForCapturePreset(audioSource, role, inputDevices2));
+ devices.clear();
+ ASSERT_EQ(NO_ERROR, mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_THAT(devices, UnorderedElementsAre(inputDevice2));
+
+ // Test add devices
+ ASSERT_EQ(NO_ERROR,
+ mManager->addDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+ devices.clear();
+ ASSERT_EQ(NO_ERROR, mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_THAT(devices, UnorderedElementsAre(inputDevice, inputDevice2));
+
+ // Test remove devices
+ ASSERT_EQ(NO_ERROR,
+ mManager->removeDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+ devices.clear();
+ ASSERT_EQ(NO_ERROR, mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_THAT(devices, UnorderedElementsAre(inputDevice2));
+
+ // Test remove devices that are not set as the device role
+ ASSERT_EQ(BAD_VALUE,
+ mManager->removeDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+
+ // Test clear devices
+ ASSERT_EQ(NO_ERROR,
+ mManager->clearDevicesRoleForCapturePreset(audioSource, role));
+ devices.clear();
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+}
+
+INSTANTIATE_TEST_CASE_P(
+ DevicesRoleForCapturePresetOperation,
+ AudioPolicyManagerDevicesRoleForCapturePresetTest,
+ testing::Values(
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_MIC, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_UPLINK,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_DOWNLINK,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_CALL, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_CAMCORDER, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_RECOGNITION,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_COMMUNICATION,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_REMOTE_SUBMIX,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_UNPROCESSED, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_PERFORMANCE,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_ECHO_REFERENCE,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_FM_TUNER, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_HOTWORD, DEVICE_ROLE_PREFERRED})
+ )
+ );
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index adafbda..138e429 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -3692,9 +3692,14 @@
__FUNCTION__, cameraId.string());
return;
}
+
+ // Collect the logical cameras without holding mStatusLock in updateStatus
+ // as that can lead to a deadlock(b/162192331).
+ auto logicalCameraIds = getLogicalCameras(cameraId);
// Update the status for this camera state, then send the onStatusChangedCallbacks to each
// of the listeners with both the mStatusLock and mStatusListenerLock held
- state->updateStatus(status, cameraId, rejectSourceStates, [this, &deviceKind, &supportsHAL3]
+ state->updateStatus(status, cameraId, rejectSourceStates, [this, &deviceKind, &supportsHAL3,
+ &logicalCameraIds]
(const String8& cameraId, StatusInternal status) {
if (status != StatusInternal::ENUMERATING) {
@@ -3714,8 +3719,8 @@
}
Mutex::Autolock lock(mStatusListenerLock);
-
- notifyPhysicalCameraStatusLocked(mapToInterface(status), cameraId, deviceKind);
+ notifyPhysicalCameraStatusLocked(mapToInterface(status), String16(cameraId),
+ logicalCameraIds, deviceKind);
for (auto& listener : mListenerList) {
bool isVendorListener = listener->isVendorListener();
@@ -3833,8 +3838,9 @@
return OK;
}
-void CameraService::notifyPhysicalCameraStatusLocked(int32_t status, const String8& cameraId,
- SystemCameraKind deviceKind) {
+std::list<String16> CameraService::getLogicalCameras(
+ const String8& physicalCameraId) {
+ std::list<String16> retList;
Mutex::Autolock lock(mCameraStatesLock);
for (const auto& state : mCameraStates) {
std::vector<std::string> physicalCameraIds;
@@ -3842,26 +3848,39 @@
// This is not a logical multi-camera.
continue;
}
- if (std::find(physicalCameraIds.begin(), physicalCameraIds.end(), cameraId.c_str())
+ if (std::find(physicalCameraIds.begin(), physicalCameraIds.end(), physicalCameraId.c_str())
== physicalCameraIds.end()) {
// cameraId is not a physical camera of this logical multi-camera.
continue;
}
- String16 id16(state.first), physicalId16(cameraId);
+ retList.emplace_back(String16(state.first));
+ }
+ return retList;
+}
+
+void CameraService::notifyPhysicalCameraStatusLocked(int32_t status,
+ const String16& physicalCameraId, const std::list<String16>& logicalCameraIds,
+ SystemCameraKind deviceKind) {
+ // mStatusListenerLock is expected to be locked
+ for (const auto& logicalCameraId : logicalCameraIds) {
for (auto& listener : mListenerList) {
+ // Note: we check only the deviceKind of the physical camera id
+ // since, logical camera ids and their physical camera ids are
+ // guaranteed to have the same system camera kind.
if (shouldSkipStatusUpdates(deviceKind, listener->isVendorListener(),
listener->getListenerPid(), listener->getListenerUid())) {
ALOGV("Skipping discovery callback for system-only camera device %s",
- cameraId.c_str());
+ String8(physicalCameraId).c_str());
continue;
}
listener->getListener()->onPhysicalCameraStatusChanged(status,
- id16, physicalId16);
+ logicalCameraId, physicalCameraId);
}
}
}
+
void CameraService::blockClientsForUid(uid_t uid) {
const auto clients = mActiveClientManager.getAll();
for (auto& current : clients) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 3d3b7dd..6f37e9f 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -49,6 +49,7 @@
#include <set>
#include <string>
+#include <list>
#include <map>
#include <memory>
#include <optional>
@@ -999,8 +1000,13 @@
hardware::camera::common::V1_0::TorchModeStatus status);
// notify physical camera status when the physical camera is public.
- void notifyPhysicalCameraStatusLocked(int32_t status, const String8& cameraId,
- SystemCameraKind deviceKind);
+ // Expects mStatusListenerLock to be locked.
+ void notifyPhysicalCameraStatusLocked(int32_t status, const String16& physicalCameraId,
+ const std::list<String16>& logicalCameraIds, SystemCameraKind deviceKind);
+
+ // get list of logical cameras which are backed by physicalCameraId
+ std::list<String16> getLogicalCameras(const String8& physicalCameraId);
+
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder> &who);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 022d686..e80838b 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -21,6 +21,7 @@
#include <cutils/properties.h>
#include <utils/CameraThreadState.h>
#include <utils/Log.h>
+#include <utils/SessionConfigurationUtils.h>
#include <utils/Trace.h>
#include <gui/Surface.h>
#include <camera/camera2/CaptureRequest.h>
@@ -492,7 +493,8 @@
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
- res = checkOperatingMode(operatingMode, mDevice->info(), mCameraIdStr);
+ res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
+ mCameraIdStr);
if (!res.isOk()) {
return res;
}
@@ -550,247 +552,6 @@
return res;
}
-binder::Status CameraDeviceClient::checkSurfaceType(size_t numBufferProducers,
- bool deferredConsumer, int surfaceType) {
- if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
- ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
- __FUNCTION__, numBufferProducers, MAX_SURFACES_PER_STREAM);
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
- } else if ((numBufferProducers == 0) && (!deferredConsumer)) {
- ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "No valid consumers.");
- }
-
- bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
- (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
-
- if (deferredConsumer && !validSurfaceType) {
- ALOGE("%s: Target surface has invalid surfaceType = %d.", __FUNCTION__, surfaceType);
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
- }
-
- return binder::Status::ok();
-}
-
-binder::Status CameraDeviceClient::checkPhysicalCameraId(
- const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
- const String8 &logicalCameraId) {
- if (physicalCameraId.size() == 0) {
- return binder::Status::ok();
- }
- if (std::find(physicalCameraIds.begin(), physicalCameraIds.end(),
- physicalCameraId.string()) == physicalCameraIds.end()) {
- String8 msg = String8::format("Camera %s: Camera doesn't support physicalCameraId %s.",
- logicalCameraId.string(), physicalCameraId.string());
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- return binder::Status::ok();
-}
-
-binder::Status CameraDeviceClient::checkOperatingMode(int operatingMode,
- const CameraMetadata &staticInfo, const String8 &cameraId) {
- if (operatingMode < 0) {
- String8 msg = String8::format(
- "Camera %s: Invalid operating mode %d requested", cameraId.string(), operatingMode);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
- msg.string());
- }
-
- bool isConstrainedHighSpeed = (operatingMode == ICameraDeviceUser::CONSTRAINED_HIGH_SPEED_MODE);
- if (isConstrainedHighSpeed) {
- camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
- bool isConstrainedHighSpeedSupported = false;
- for(size_t i = 0; i < entry.count; ++i) {
- uint8_t capability = entry.data.u8[i];
- if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO) {
- isConstrainedHighSpeedSupported = true;
- break;
- }
- }
- if (!isConstrainedHighSpeedSupported) {
- String8 msg = String8::format(
- "Camera %s: Try to create a constrained high speed configuration on a device"
- " that doesn't support it.", cameraId.string());
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
- msg.string());
- }
- }
-
- return binder::Status::ok();
-}
-
-void CameraDeviceClient::mapStreamInfo(const OutputStreamInfo &streamInfo,
- camera3_stream_rotation_t rotation, String8 physicalId,
- hardware::camera::device::V3_4::Stream *stream /*out*/) {
- if (stream == nullptr) {
- return;
- }
-
- stream->v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
- stream->v3_2.width = streamInfo.width;
- stream->v3_2.height = streamInfo.height;
- stream->v3_2.format = Camera3Device::mapToPixelFormat(streamInfo.format);
- auto u = streamInfo.consumerUsage;
- camera3::Camera3OutputStream::applyZSLUsageQuirk(streamInfo.format, &u);
- stream->v3_2.usage = Camera3Device::mapToConsumerUsage(u);
- stream->v3_2.dataSpace = Camera3Device::mapToHidlDataspace(streamInfo.dataSpace);
- stream->v3_2.rotation = Camera3Device::mapToStreamRotation(rotation);
- stream->v3_2.id = -1; // Invalid stream id
- stream->physicalCameraId = std::string(physicalId.string());
- stream->bufferSize = 0;
-}
-
-binder::Status
-CameraDeviceClient::convertToHALStreamCombination(const SessionConfiguration& sessionConfiguration,
- const String8 &logicalCameraId, const CameraMetadata &deviceInfo,
- metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
- hardware::camera::device::V3_4::StreamConfiguration &streamConfiguration,
- bool *unsupported) {
- auto operatingMode = sessionConfiguration.getOperatingMode();
- binder::Status res = checkOperatingMode(operatingMode, deviceInfo, logicalCameraId);
- if (!res.isOk()) {
- return res;
- }
-
- if (unsupported == nullptr) {
- String8 msg("unsupported nullptr");
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- *unsupported = false;
- auto ret = Camera3Device::mapToStreamConfigurationMode(
- static_cast<camera3_stream_configuration_mode_t> (operatingMode),
- /*out*/ &streamConfiguration.operationMode);
- if (ret != OK) {
- String8 msg = String8::format(
- "Camera %s: Failed mapping operating mode %d requested: %s (%d)",
- logicalCameraId.string(), operatingMode, strerror(-ret), ret);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
- msg.string());
- }
-
- bool isInputValid = (sessionConfiguration.getInputWidth() > 0) &&
- (sessionConfiguration.getInputHeight() > 0) &&
- (sessionConfiguration.getInputFormat() > 0);
- auto outputConfigs = sessionConfiguration.getOutputConfigurations();
- size_t streamCount = outputConfigs.size();
- streamCount = isInputValid ? streamCount + 1 : streamCount;
- streamConfiguration.streams.resize(streamCount);
- size_t streamIdx = 0;
- if (isInputValid) {
- streamConfiguration.streams[streamIdx++] = {{/*streamId*/0,
- hardware::camera::device::V3_2::StreamType::INPUT,
- static_cast<uint32_t> (sessionConfiguration.getInputWidth()),
- static_cast<uint32_t> (sessionConfiguration.getInputHeight()),
- Camera3Device::mapToPixelFormat(sessionConfiguration.getInputFormat()),
- /*usage*/ 0, HAL_DATASPACE_UNKNOWN,
- hardware::camera::device::V3_2::StreamRotation::ROTATION_0},
- /*physicalId*/ nullptr, /*bufferSize*/0};
- }
-
- for (const auto &it : outputConfigs) {
- const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
- it.getGraphicBufferProducers();
- bool deferredConsumer = it.isDeferred();
- String8 physicalCameraId = String8(it.getPhysicalCameraId());
- size_t numBufferProducers = bufferProducers.size();
- bool isStreamInfoValid = false;
- OutputStreamInfo streamInfo;
-
- res = checkSurfaceType(numBufferProducers, deferredConsumer, it.getSurfaceType());
- if (!res.isOk()) {
- return res;
- }
- res = checkPhysicalCameraId(physicalCameraIds, physicalCameraId,
- logicalCameraId);
- if (!res.isOk()) {
- return res;
- }
-
- if (deferredConsumer) {
- streamInfo.width = it.getWidth();
- streamInfo.height = it.getHeight();
- streamInfo.format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- streamInfo.dataSpace = android_dataspace_t::HAL_DATASPACE_UNKNOWN;
- auto surfaceType = it.getSurfaceType();
- streamInfo.consumerUsage = GraphicBuffer::USAGE_HW_TEXTURE;
- if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
- streamInfo.consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
- }
- mapStreamInfo(streamInfo, CAMERA3_STREAM_ROTATION_0, physicalCameraId,
- &streamConfiguration.streams[streamIdx++]);
- isStreamInfoValid = true;
-
- if (numBufferProducers == 0) {
- continue;
- }
- }
-
- for (auto& bufferProducer : bufferProducers) {
- sp<Surface> surface;
- const CameraMetadata &physicalDeviceInfo = getMetadata(physicalCameraId);
- res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
- logicalCameraId,
- physicalCameraId.size() > 0 ? physicalDeviceInfo : deviceInfo );
-
- if (!res.isOk())
- return res;
-
- if (!isStreamInfoValid) {
- bool isDepthCompositeStream =
- camera3::DepthCompositeStream::isDepthCompositeStream(surface);
- bool isHeicCompositeStream =
- camera3::HeicCompositeStream::isHeicCompositeStream(surface);
- if (isDepthCompositeStream || isHeicCompositeStream) {
- // We need to take in to account that composite streams can have
- // additional internal camera streams.
- std::vector<OutputStreamInfo> compositeStreams;
- if (isDepthCompositeStream) {
- ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
- deviceInfo, &compositeStreams);
- } else {
- ret = camera3::HeicCompositeStream::getCompositeStreamInfo(streamInfo,
- deviceInfo, &compositeStreams);
- }
- if (ret != OK) {
- String8 msg = String8::format(
- "Camera %s: Failed adding composite streams: %s (%d)",
- logicalCameraId.string(), strerror(-ret), ret);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
-
- if (compositeStreams.size() == 0) {
- // No internal streams means composite stream not
- // supported.
- *unsupported = true;
- return binder::Status::ok();
- } else if (compositeStreams.size() > 1) {
- streamCount += compositeStreams.size() - 1;
- streamConfiguration.streams.resize(streamCount);
- }
-
- for (const auto& compositeStream : compositeStreams) {
- mapStreamInfo(compositeStream,
- static_cast<camera3_stream_rotation_t> (it.getRotation()),
- physicalCameraId, &streamConfiguration.streams[streamIdx++]);
- }
- } else {
- mapStreamInfo(streamInfo,
- static_cast<camera3_stream_rotation_t> (it.getRotation()),
- physicalCameraId, &streamConfiguration.streams[streamIdx++]);
- }
- isStreamInfoValid = true;
- }
- }
- }
- return binder::Status::ok();
-}
-
binder::Status CameraDeviceClient::isSessionConfigurationSupported(
const SessionConfiguration& sessionConfiguration, bool *status /*out*/) {
ATRACE_CALL();
@@ -806,7 +567,8 @@
}
auto operatingMode = sessionConfiguration.getOperatingMode();
- res = checkOperatingMode(operatingMode, mDevice->info(), mCameraIdStr);
+ res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
+ mCameraIdStr);
if (!res.isOk()) {
return res;
}
@@ -821,8 +583,9 @@
metadataGetter getMetadata = [this](const String8 &id) {return mDevice->infoPhysical(id);};
std::vector<std::string> physicalCameraIds;
mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
- res = convertToHALStreamCombination(sessionConfiguration, mCameraIdStr,
- mDevice->info(), getMetadata, physicalCameraIds, streamConfiguration, &earlyExit);
+ res = SessionConfigurationUtils::convertToHALStreamCombination(sessionConfiguration,
+ mCameraIdStr, mDevice->info(), getMetadata, physicalCameraIds, streamConfiguration,
+ &earlyExit);
if (!res.isOk()) {
return res;
}
@@ -970,7 +733,7 @@
String8 physicalCameraId = String8(outputConfiguration.getPhysicalCameraId());
bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
- res = checkSurfaceType(numBufferProducers, deferredConsumer,
+ res = SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
outputConfiguration.getSurfaceType());
if (!res.isOk()) {
return res;
@@ -981,7 +744,8 @@
}
std::vector<std::string> physicalCameraIds;
mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
- res = checkPhysicalCameraId(physicalCameraIds, physicalCameraId, mCameraIdStr);
+ res = SessionConfigurationUtils::checkPhysicalCameraId(physicalCameraIds, physicalCameraId,
+ mCameraIdStr);
if (!res.isOk()) {
return res;
}
@@ -1009,8 +773,8 @@
}
sp<Surface> surface;
- res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
- mCameraIdStr, mDevice->infoPhysical(physicalCameraId));
+ res = SessionConfigurationUtils::createSurfaceFromGbp(streamInfo, isStreamInfoValid,
+ surface, bufferProducer, mCameraIdStr, mDevice->infoPhysical(physicalCameraId));
if (!res.isOk())
return res;
@@ -1313,8 +1077,9 @@
for (size_t i = 0; i < newOutputsMap.size(); i++) {
OutputStreamInfo outInfo;
sp<Surface> surface;
- res = createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false, surface,
- newOutputsMap.valueAt(i), mCameraIdStr, mDevice->infoPhysical(physicalCameraId));
+ res = SessionConfigurationUtils::createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false,
+ surface, newOutputsMap.valueAt(i), mCameraIdStr,
+ mDevice->infoPhysical(physicalCameraId));
if (!res.isOk())
return res;
@@ -1364,226 +1129,6 @@
return res;
}
-bool CameraDeviceClient::isPublicFormat(int32_t format)
-{
- switch(format) {
- case HAL_PIXEL_FORMAT_RGBA_8888:
- case HAL_PIXEL_FORMAT_RGBX_8888:
- case HAL_PIXEL_FORMAT_RGB_888:
- case HAL_PIXEL_FORMAT_RGB_565:
- case HAL_PIXEL_FORMAT_BGRA_8888:
- case HAL_PIXEL_FORMAT_YV12:
- case HAL_PIXEL_FORMAT_Y8:
- case HAL_PIXEL_FORMAT_Y16:
- case HAL_PIXEL_FORMAT_RAW16:
- case HAL_PIXEL_FORMAT_RAW10:
- case HAL_PIXEL_FORMAT_RAW12:
- case HAL_PIXEL_FORMAT_RAW_OPAQUE:
- case HAL_PIXEL_FORMAT_BLOB:
- case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
- case HAL_PIXEL_FORMAT_YCbCr_420_888:
- case HAL_PIXEL_FORMAT_YCbCr_422_SP:
- case HAL_PIXEL_FORMAT_YCrCb_420_SP:
- case HAL_PIXEL_FORMAT_YCbCr_422_I:
- return true;
- default:
- return false;
- }
-}
-
-binder::Status CameraDeviceClient::createSurfaceFromGbp(
- OutputStreamInfo& streamInfo, bool isStreamInfoValid,
- sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
- const String8 &cameraId, const CameraMetadata &physicalCameraMetadata) {
-
- // bufferProducer must be non-null
- if (gbp == nullptr) {
- String8 msg = String8::format("Camera %s: Surface is NULL", cameraId.string());
- ALOGW("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- // HACK b/10949105
- // Query consumer usage bits to set async operation mode for
- // GLConsumer using controlledByApp parameter.
- bool useAsync = false;
- uint64_t consumerUsage = 0;
- status_t err;
- if ((err = gbp->getConsumerUsage(&consumerUsage)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
- ALOGW("%s: Camera %s with consumer usage flag: %" PRIu64 ": Forcing asynchronous mode for stream",
- __FUNCTION__, cameraId.string(), consumerUsage);
- useAsync = true;
- }
-
- uint64_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
- GRALLOC_USAGE_RENDERSCRIPT;
- uint64_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
- GraphicBuffer::USAGE_HW_TEXTURE |
- GraphicBuffer::USAGE_HW_COMPOSER;
- bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
- (consumerUsage & allowedFlags) != 0;
-
- surface = new Surface(gbp, useAsync);
- ANativeWindow *anw = surface.get();
-
- int width, height, format;
- android_dataspace dataSpace;
- if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
- reinterpret_cast<int*>(&dataSpace))) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
-
- // FIXME: remove this override since the default format should be
- // IMPLEMENTATION_DEFINED. b/9487482 & b/35317944
- if ((format >= HAL_PIXEL_FORMAT_RGBA_8888 && format <= HAL_PIXEL_FORMAT_BGRA_8888) &&
- ((consumerUsage & GRALLOC_USAGE_HW_MASK) &&
- ((consumerUsage & GRALLOC_USAGE_SW_READ_MASK) == 0))) {
- ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
- __FUNCTION__, cameraId.string(), format);
- format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- }
- // Round dimensions to the nearest dimensions available for this format
- if (flexibleConsumer && isPublicFormat(format) &&
- !CameraDeviceClient::roundBufferDimensionNearest(width, height,
- format, dataSpace, physicalCameraMetadata, /*out*/&width, /*out*/&height)) {
- String8 msg = String8::format("Camera %s: No supported stream configurations with "
- "format %#x defined, failed to create output stream",
- cameraId.string(), format);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
-
- if (!isStreamInfoValid) {
- streamInfo.width = width;
- streamInfo.height = height;
- streamInfo.format = format;
- streamInfo.dataSpace = dataSpace;
- streamInfo.consumerUsage = consumerUsage;
- return binder::Status::ok();
- }
- if (width != streamInfo.width) {
- String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
- cameraId.string(), width, streamInfo.width);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- if (height != streamInfo.height) {
- String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
- cameraId.string(), height, streamInfo.height);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- if (format != streamInfo.format) {
- String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
- cameraId.string(), format, streamInfo.format);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
- if (dataSpace != streamInfo.dataSpace) {
- String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
- cameraId.string(), dataSpace, streamInfo.dataSpace);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- //At the native side, there isn't a way to check whether 2 surfaces come from the same
- //surface class type. Use usage flag to approximate the comparison.
- if (consumerUsage != streamInfo.consumerUsage) {
- String8 msg = String8::format(
- "Camera %s:Surface usage flag doesn't match %" PRIu64 " vs %" PRIu64 "",
- cameraId.string(), consumerUsage, streamInfo.consumerUsage);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- }
- return binder::Status::ok();
-}
-
-bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
- int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
- /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
-
- camera_metadata_ro_entry streamConfigs =
- (dataSpace == HAL_DATASPACE_DEPTH) ?
- info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) :
- (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_HEIF)) ?
- info.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS) :
- info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
-
- int32_t bestWidth = -1;
- int32_t bestHeight = -1;
-
- // Iterate through listed stream configurations and find the one with the smallest euclidean
- // distance from the given dimensions for the given format.
- for (size_t i = 0; i < streamConfigs.count; i += 4) {
- int32_t fmt = streamConfigs.data.i32[i];
- int32_t w = streamConfigs.data.i32[i + 1];
- int32_t h = streamConfigs.data.i32[i + 2];
-
- // Ignore input/output type for now
- if (fmt == format) {
- if (w == width && h == height) {
- bestWidth = width;
- bestHeight = height;
- break;
- } else if (w <= ROUNDING_WIDTH_CAP && (bestWidth == -1 ||
- CameraDeviceClient::euclidDistSquare(w, h, width, height) <
- CameraDeviceClient::euclidDistSquare(bestWidth, bestHeight, width, height))) {
- bestWidth = w;
- bestHeight = h;
- }
- }
- }
-
- if (bestWidth == -1) {
- // Return false if no configurations for this format were listed
- return false;
- }
-
- // Set the outputs to the closet width/height
- if (outWidth != NULL) {
- *outWidth = bestWidth;
- }
- if (outHeight != NULL) {
- *outHeight = bestHeight;
- }
-
- // Return true if at least one configuration for this format was listed
- return true;
-}
-
-int64_t CameraDeviceClient::euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1) {
- int64_t d0 = x0 - x1;
- int64_t d1 = y0 - y1;
- return d0 * d0 + d1 * d1;
-}
-
// Create a request object from a template.
binder::Status CameraDeviceClient::createDefaultRequest(int templateId,
/*out*/
@@ -1896,8 +1441,9 @@
}
sp<Surface> surface;
- res = createSurfaceFromGbp(mStreamInfoMap[streamId], true /*isStreamInfoValid*/,
- surface, bufferProducer, mCameraIdStr, mDevice->infoPhysical(physicalId));
+ res = SessionConfigurationUtils::createSurfaceFromGbp(mStreamInfoMap[streamId],
+ true /*isStreamInfoValid*/, surface, bufferProducer, mCameraIdStr,
+ mDevice->infoPhysical(physicalId));
if (!res.isOk())
return res;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 5cd16ee..2807aee 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -204,16 +204,6 @@
virtual void notifyRequestQueueEmpty();
virtual void notifyRepeatingRequestError(long lastFrameNumber);
- // utility function to convert AIDL SessionConfiguration to HIDL
- // streamConfiguration. Also checks for validity of SessionConfiguration and
- // returns a non-ok binder::Status if the passed in session configuration
- // isn't valid.
- static binder::Status
- convertToHALStreamCombination(const SessionConfiguration& sessionConfiguration,
- const String8 &cameraId, const CameraMetadata &deviceInfo,
- metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
- hardware::camera::device::V3_4::StreamConfiguration &streamConfiguration,
- bool *earlyExit);
/**
* Interface used by independent components of CameraDeviceClient.
*/
@@ -266,18 +256,8 @@
/** Utility members */
binder::Status checkPidStatus(const char* checkLocation);
- static binder::Status checkOperatingMode(int operatingMode, const CameraMetadata &staticInfo,
- const String8 &cameraId);
- static binder::Status checkSurfaceType(size_t numBufferProducers, bool deferredConsumer,
- int surfaceType);
- static void mapStreamInfo(const OutputStreamInfo &streamInfo,
- camera3_stream_rotation_t rotation, String8 physicalId,
- hardware::camera::device::V3_4::Stream *stream /*out*/);
bool enforceRequestPermissions(CameraMetadata& metadata);
- // Find the square of the euclidean distance between two points
- static int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
-
// Create an output stream with surface deferred for future.
binder::Status createDeferredSurfaceStreamLocked(
const hardware::camera2::params::OutputConfiguration &outputConfiguration,
@@ -288,33 +268,11 @@
// cases.
binder::Status setStreamTransformLocked(int streamId);
- // Find the closest dimensions for a given format in available stream configurations with
- // a width <= ROUNDING_WIDTH_CAP
- static const int32_t ROUNDING_WIDTH_CAP = 1920;
- static bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
- android_dataspace dataSpace, const CameraMetadata& info,
- /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
-
- //check if format is not custom format
- static bool isPublicFormat(int32_t format);
-
- // Create a Surface from an IGraphicBufferProducer. Returns error if
- // IGraphicBufferProducer's property doesn't match with streamInfo
- static binder::Status createSurfaceFromGbp(OutputStreamInfo& streamInfo, bool isStreamInfoValid,
- sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp, const String8 &cameraId,
- const CameraMetadata &physicalCameraMetadata);
-
-
// Utility method to insert the surface into SurfaceMap
binder::Status insertGbpLocked(const sp<IGraphicBufferProducer>& gbp,
/*out*/SurfaceMap* surfaceMap, /*out*/Vector<int32_t>* streamIds,
/*out*/int32_t* currentStreamId);
- // Check that the physicalCameraId passed in is spported by the camera
- // device.
- static binder::Status checkPhysicalCameraId(const std::vector<std::string> &physicalCameraIds,
- const String8 &physicalCameraId, const String8 &logicalCameraId);
-
// IGraphicsBufferProducer binder -> Stream ID + Surface ID for output streams
KeyedVector<sp<IBinder>, StreamSurfaceId> mStreamMap;
@@ -346,7 +304,6 @@
KeyedVector<sp<IBinder>, sp<CompositeStream>> mCompositeStreamMap;
- static const int32_t MAX_SURFACES_PER_STREAM = 4;
sp<CameraProviderManager> mProviderManager;
};
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index 888671c..ba68a63 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -14,20 +14,493 @@
* limitations under the License.
*/
#include "SessionConfigurationUtils.h"
-#include "../api2/CameraDeviceClient.h"
+#include "../api2/DepthCompositeStream.h"
+#include "../api2/HeicCompositeStream.h"
+#include "common/CameraDeviceBase.h"
+#include "../CameraService.h"
+#include "device3/Camera3Device.h"
+#include "device3/Camera3OutputStream.h"
+
+// Convenience methods for constructing binder::Status objects for error returns
+
+#define STATUS_ERROR(errorCode, errorString) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
+
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, \
+ __VA_ARGS__))
+
+using android::camera3::OutputStreamInfo;
+using android::camera3::OutputStreamInfo;
+using android::hardware::camera2::ICameraDeviceUser;
namespace android {
+int64_t SessionConfigurationUtils::euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1) {
+ int64_t d0 = x0 - x1;
+ int64_t d1 = y0 - y1;
+ return d0 * d0 + d1 * d1;
+}
+
+bool SessionConfigurationUtils::roundBufferDimensionNearest(int32_t width, int32_t height,
+ int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
+ /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
+
+ camera_metadata_ro_entry streamConfigs =
+ (dataSpace == HAL_DATASPACE_DEPTH) ?
+ info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) :
+ (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_HEIF)) ?
+ info.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS) :
+ info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+
+ int32_t bestWidth = -1;
+ int32_t bestHeight = -1;
+
+ // Iterate through listed stream configurations and find the one with the smallest euclidean
+ // distance from the given dimensions for the given format.
+ for (size_t i = 0; i < streamConfigs.count; i += 4) {
+ int32_t fmt = streamConfigs.data.i32[i];
+ int32_t w = streamConfigs.data.i32[i + 1];
+ int32_t h = streamConfigs.data.i32[i + 2];
+
+ // Ignore input/output type for now
+ if (fmt == format) {
+ if (w == width && h == height) {
+ bestWidth = width;
+ bestHeight = height;
+ break;
+ } else if (w <= ROUNDING_WIDTH_CAP && (bestWidth == -1 ||
+ SessionConfigurationUtils::euclidDistSquare(w, h, width, height) <
+ SessionConfigurationUtils::euclidDistSquare(bestWidth, bestHeight, width,
+ height))) {
+ bestWidth = w;
+ bestHeight = h;
+ }
+ }
+ }
+
+ if (bestWidth == -1) {
+ // Return false if no configurations for this format were listed
+ return false;
+ }
+
+ // Set the outputs to the closet width/height
+ if (outWidth != NULL) {
+ *outWidth = bestWidth;
+ }
+ if (outHeight != NULL) {
+ *outHeight = bestHeight;
+ }
+
+ // Return true if at least one configuration for this format was listed
+ return true;
+}
+
+bool SessionConfigurationUtils::isPublicFormat(int32_t format)
+{
+ switch(format) {
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_RGBX_8888:
+ case HAL_PIXEL_FORMAT_RGB_888:
+ case HAL_PIXEL_FORMAT_RGB_565:
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ case HAL_PIXEL_FORMAT_YV12:
+ case HAL_PIXEL_FORMAT_Y8:
+ case HAL_PIXEL_FORMAT_Y16:
+ case HAL_PIXEL_FORMAT_RAW16:
+ case HAL_PIXEL_FORMAT_RAW10:
+ case HAL_PIXEL_FORMAT_RAW12:
+ case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+ case HAL_PIXEL_FORMAT_BLOB:
+ case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+ case HAL_PIXEL_FORMAT_YCbCr_420_888:
+ case HAL_PIXEL_FORMAT_YCbCr_422_SP:
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ case HAL_PIXEL_FORMAT_YCbCr_422_I:
+ return true;
+ default:
+ return false;
+ }
+}
+
+binder::Status SessionConfigurationUtils::createSurfaceFromGbp(
+ OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+ const String8 &cameraId, const CameraMetadata &physicalCameraMetadata) {
+
+ // bufferProducer must be non-null
+ if (gbp == nullptr) {
+ String8 msg = String8::format("Camera %s: Surface is NULL", cameraId.string());
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ // HACK b/10949105
+ // Query consumer usage bits to set async operation mode for
+ // GLConsumer using controlledByApp parameter.
+ bool useAsync = false;
+ uint64_t consumerUsage = 0;
+ status_t err;
+ if ((err = gbp->getConsumerUsage(&consumerUsage)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
+ ALOGW("%s: Camera %s with consumer usage flag: %" PRIu64 ": Forcing asynchronous mode for"
+ "stream", __FUNCTION__, cameraId.string(), consumerUsage);
+ useAsync = true;
+ }
+
+ uint64_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
+ GRALLOC_USAGE_RENDERSCRIPT;
+ uint64_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
+ GraphicBuffer::USAGE_HW_TEXTURE |
+ GraphicBuffer::USAGE_HW_COMPOSER;
+ bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
+ (consumerUsage & allowedFlags) != 0;
+
+ surface = new Surface(gbp, useAsync);
+ ANativeWindow *anw = surface.get();
+
+ int width, height, format;
+ android_dataspace dataSpace;
+ if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+ reinterpret_cast<int*>(&dataSpace))) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+
+ // FIXME: remove this override since the default format should be
+ // IMPLEMENTATION_DEFINED. b/9487482 & b/35317944
+ if ((format >= HAL_PIXEL_FORMAT_RGBA_8888 && format <= HAL_PIXEL_FORMAT_BGRA_8888) &&
+ ((consumerUsage & GRALLOC_USAGE_HW_MASK) &&
+ ((consumerUsage & GRALLOC_USAGE_SW_READ_MASK) == 0))) {
+ ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
+ __FUNCTION__, cameraId.string(), format);
+ format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ }
+ // Round dimensions to the nearest dimensions available for this format
+ if (flexibleConsumer && isPublicFormat(format) &&
+ !SessionConfigurationUtils::roundBufferDimensionNearest(width, height,
+ format, dataSpace, physicalCameraMetadata, /*out*/&width, /*out*/&height)) {
+ String8 msg = String8::format("Camera %s: No supported stream configurations with "
+ "format %#x defined, failed to create output stream",
+ cameraId.string(), format);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ if (!isStreamInfoValid) {
+ streamInfo.width = width;
+ streamInfo.height = height;
+ streamInfo.format = format;
+ streamInfo.dataSpace = dataSpace;
+ streamInfo.consumerUsage = consumerUsage;
+ return binder::Status::ok();
+ }
+ if (width != streamInfo.width) {
+ String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
+ cameraId.string(), width, streamInfo.width);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (height != streamInfo.height) {
+ String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
+ cameraId.string(), height, streamInfo.height);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (format != streamInfo.format) {
+ String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
+ cameraId.string(), format, streamInfo.format);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ if (dataSpace != streamInfo.dataSpace) {
+ String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
+ cameraId.string(), dataSpace, streamInfo.dataSpace);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ //At the native side, there isn't a way to check whether 2 surfaces come from the same
+ //surface class type. Use usage flag to approximate the comparison.
+ if (consumerUsage != streamInfo.consumerUsage) {
+ String8 msg = String8::format(
+ "Camera %s:Surface usage flag doesn't match %" PRIu64 " vs %" PRIu64 "",
+ cameraId.string(), consumerUsage, streamInfo.consumerUsage);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ }
+ return binder::Status::ok();
+}
+
+
+void SessionConfigurationUtils::mapStreamInfo(const OutputStreamInfo &streamInfo,
+ camera3_stream_rotation_t rotation, String8 physicalId,
+ hardware::camera::device::V3_4::Stream *stream /*out*/) {
+ if (stream == nullptr) {
+ return;
+ }
+
+ stream->v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
+ stream->v3_2.width = streamInfo.width;
+ stream->v3_2.height = streamInfo.height;
+ stream->v3_2.format = Camera3Device::mapToPixelFormat(streamInfo.format);
+ auto u = streamInfo.consumerUsage;
+ camera3::Camera3OutputStream::applyZSLUsageQuirk(streamInfo.format, &u);
+ stream->v3_2.usage = Camera3Device::mapToConsumerUsage(u);
+ stream->v3_2.dataSpace = Camera3Device::mapToHidlDataspace(streamInfo.dataSpace);
+ stream->v3_2.rotation = Camera3Device::mapToStreamRotation(rotation);
+ stream->v3_2.id = -1; // Invalid stream id
+ stream->physicalCameraId = std::string(physicalId.string());
+ stream->bufferSize = 0;
+}
+
+binder::Status SessionConfigurationUtils::checkPhysicalCameraId(
+ const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
+ const String8 &logicalCameraId) {
+ if (physicalCameraId.size() == 0) {
+ return binder::Status::ok();
+ }
+ if (std::find(physicalCameraIds.begin(), physicalCameraIds.end(),
+ physicalCameraId.string()) == physicalCameraIds.end()) {
+ String8 msg = String8::format("Camera %s: Camera doesn't support physicalCameraId %s.",
+ logicalCameraId.string(), physicalCameraId.string());
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ return binder::Status::ok();
+}
+
+binder::Status SessionConfigurationUtils::checkSurfaceType(size_t numBufferProducers,
+ bool deferredConsumer, int surfaceType) {
+ if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
+ ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
+ __FUNCTION__, numBufferProducers, MAX_SURFACES_PER_STREAM);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
+ } else if ((numBufferProducers == 0) && (!deferredConsumer)) {
+ ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "No valid consumers.");
+ }
+
+ bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
+ (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
+
+ if (deferredConsumer && !validSurfaceType) {
+ ALOGE("%s: Target surface has invalid surfaceType = %d.", __FUNCTION__, surfaceType);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
+ }
+
+ return binder::Status::ok();
+}
+
+binder::Status SessionConfigurationUtils::checkOperatingMode(int operatingMode,
+ const CameraMetadata &staticInfo, const String8 &cameraId) {
+ if (operatingMode < 0) {
+ String8 msg = String8::format(
+ "Camera %s: Invalid operating mode %d requested", cameraId.string(), operatingMode);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ msg.string());
+ }
+
+ bool isConstrainedHighSpeed = (operatingMode == ICameraDeviceUser::CONSTRAINED_HIGH_SPEED_MODE);
+ if (isConstrainedHighSpeed) {
+ camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ bool isConstrainedHighSpeedSupported = false;
+ for(size_t i = 0; i < entry.count; ++i) {
+ uint8_t capability = entry.data.u8[i];
+ if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO) {
+ isConstrainedHighSpeedSupported = true;
+ break;
+ }
+ }
+ if (!isConstrainedHighSpeedSupported) {
+ String8 msg = String8::format(
+ "Camera %s: Try to create a constrained high speed configuration on a device"
+ " that doesn't support it.", cameraId.string());
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ msg.string());
+ }
+ }
+
+ return binder::Status::ok();
+}
+
binder::Status
SessionConfigurationUtils::convertToHALStreamCombination(
const SessionConfiguration& sessionConfiguration,
const String8 &logicalCameraId, const CameraMetadata &deviceInfo,
metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
hardware::camera::device::V3_4::StreamConfiguration &streamConfiguration, bool *earlyExit) {
- // TODO: http://b/148329298 Move the other dependencies from
- // CameraDeviceClient into SessionConfigurationUtils.
- return CameraDeviceClient::convertToHALStreamCombination(sessionConfiguration, logicalCameraId,
- deviceInfo, getMetadata, physicalCameraIds, streamConfiguration, earlyExit);
+
+ auto operatingMode = sessionConfiguration.getOperatingMode();
+ binder::Status res = checkOperatingMode(operatingMode, deviceInfo, logicalCameraId);
+ if (!res.isOk()) {
+ return res;
+ }
+
+ if (earlyExit == nullptr) {
+ String8 msg("earlyExit nullptr");
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ *earlyExit = false;
+ auto ret = Camera3Device::mapToStreamConfigurationMode(
+ static_cast<camera3_stream_configuration_mode_t> (operatingMode),
+ /*out*/ &streamConfiguration.operationMode);
+ if (ret != OK) {
+ String8 msg = String8::format(
+ "Camera %s: Failed mapping operating mode %d requested: %s (%d)",
+ logicalCameraId.string(), operatingMode, strerror(-ret), ret);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ msg.string());
+ }
+
+ bool isInputValid = (sessionConfiguration.getInputWidth() > 0) &&
+ (sessionConfiguration.getInputHeight() > 0) &&
+ (sessionConfiguration.getInputFormat() > 0);
+ auto outputConfigs = sessionConfiguration.getOutputConfigurations();
+ size_t streamCount = outputConfigs.size();
+ streamCount = isInputValid ? streamCount + 1 : streamCount;
+ streamConfiguration.streams.resize(streamCount);
+ size_t streamIdx = 0;
+ if (isInputValid) {
+ streamConfiguration.streams[streamIdx++] = {{/*streamId*/0,
+ hardware::camera::device::V3_2::StreamType::INPUT,
+ static_cast<uint32_t> (sessionConfiguration.getInputWidth()),
+ static_cast<uint32_t> (sessionConfiguration.getInputHeight()),
+ Camera3Device::mapToPixelFormat(sessionConfiguration.getInputFormat()),
+ /*usage*/ 0, HAL_DATASPACE_UNKNOWN,
+ hardware::camera::device::V3_2::StreamRotation::ROTATION_0},
+ /*physicalId*/ nullptr, /*bufferSize*/0};
+ }
+
+ for (const auto &it : outputConfigs) {
+ const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
+ it.getGraphicBufferProducers();
+ bool deferredConsumer = it.isDeferred();
+ String8 physicalCameraId = String8(it.getPhysicalCameraId());
+ size_t numBufferProducers = bufferProducers.size();
+ bool isStreamInfoValid = false;
+ OutputStreamInfo streamInfo;
+
+ res = checkSurfaceType(numBufferProducers, deferredConsumer, it.getSurfaceType());
+ if (!res.isOk()) {
+ return res;
+ }
+ res = checkPhysicalCameraId(physicalCameraIds, physicalCameraId,
+ logicalCameraId);
+ if (!res.isOk()) {
+ return res;
+ }
+
+ if (deferredConsumer) {
+ streamInfo.width = it.getWidth();
+ streamInfo.height = it.getHeight();
+ streamInfo.format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ streamInfo.dataSpace = android_dataspace_t::HAL_DATASPACE_UNKNOWN;
+ auto surfaceType = it.getSurfaceType();
+ streamInfo.consumerUsage = GraphicBuffer::USAGE_HW_TEXTURE;
+ if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
+ streamInfo.consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
+ }
+ mapStreamInfo(streamInfo, CAMERA3_STREAM_ROTATION_0, physicalCameraId,
+ &streamConfiguration.streams[streamIdx++]);
+ isStreamInfoValid = true;
+
+ if (numBufferProducers == 0) {
+ continue;
+ }
+ }
+
+ for (auto& bufferProducer : bufferProducers) {
+ sp<Surface> surface;
+ const CameraMetadata &physicalDeviceInfo = getMetadata(physicalCameraId);
+ res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
+ logicalCameraId,
+ physicalCameraId.size() > 0 ? physicalDeviceInfo : deviceInfo );
+
+ if (!res.isOk())
+ return res;
+
+ if (!isStreamInfoValid) {
+ bool isDepthCompositeStream =
+ camera3::DepthCompositeStream::isDepthCompositeStream(surface);
+ bool isHeicCompositeStream =
+ camera3::HeicCompositeStream::isHeicCompositeStream(surface);
+ if (isDepthCompositeStream || isHeicCompositeStream) {
+ // We need to take in to account that composite streams can have
+ // additional internal camera streams.
+ std::vector<OutputStreamInfo> compositeStreams;
+ if (isDepthCompositeStream) {
+ ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
+ deviceInfo, &compositeStreams);
+ } else {
+ ret = camera3::HeicCompositeStream::getCompositeStreamInfo(streamInfo,
+ deviceInfo, &compositeStreams);
+ }
+ if (ret != OK) {
+ String8 msg = String8::format(
+ "Camera %s: Failed adding composite streams: %s (%d)",
+ logicalCameraId.string(), strerror(-ret), ret);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ if (compositeStreams.size() == 0) {
+ // No internal streams means composite stream not
+ // supported.
+ *earlyExit = true;
+ return binder::Status::ok();
+ } else if (compositeStreams.size() > 1) {
+ streamCount += compositeStreams.size() - 1;
+ streamConfiguration.streams.resize(streamCount);
+ }
+
+ for (const auto& compositeStream : compositeStreams) {
+ mapStreamInfo(compositeStream,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
+ } else {
+ mapStreamInfo(streamInfo,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
+ isStreamInfoValid = true;
+ }
+ }
+ }
+ return binder::Status::ok();
+
}
}// namespace android
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index cfb9f17..6ce2cd7 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -23,6 +23,9 @@
#include <camera/camera2/SubmitInfo.h>
#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
+#include <hardware/camera3.h>
+#include <device3/Camera3StreamInterface.h>
+
#include <stdint.h>
namespace android {
@@ -31,6 +34,41 @@
class SessionConfigurationUtils {
public:
+
+ static int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
+
+ // Find the closest dimensions for a given format in available stream configurations with
+ // a width <= ROUNDING_WIDTH_CAP
+ static bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
+ android_dataspace dataSpace, const CameraMetadata& info,
+ /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
+
+ //check if format is not custom format
+ static bool isPublicFormat(int32_t format);
+
+ // Create a Surface from an IGraphicBufferProducer. Returns error if
+ // IGraphicBufferProducer's property doesn't match with streamInfo
+ static binder::Status createSurfaceFromGbp(
+ camera3::OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+ const String8 &cameraId, const CameraMetadata &physicalCameraMetadata);
+
+ static void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
+ camera3_stream_rotation_t rotation, String8 physicalId,
+ hardware::camera::device::V3_4::Stream *stream /*out*/);
+
+ // Check that the physicalCameraId passed in is spported by the camera
+ // device.
+ static binder::Status checkPhysicalCameraId(
+ const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
+ const String8 &logicalCameraId);
+
+ static binder::Status checkSurfaceType(size_t numBufferProducers,
+ bool deferredConsumer, int surfaceType);
+
+ static binder::Status checkOperatingMode(int operatingMode,
+ const CameraMetadata &staticInfo, const String8 &cameraId);
+
// utility function to convert AIDL SessionConfiguration to HIDL
// streamConfiguration. Also checks for validity of SessionConfiguration and
// returns a non-ok binder::Status if the passed in session configuration
@@ -41,6 +79,10 @@
metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
hardware::camera::device::V3_4::StreamConfiguration &streamConfiguration,
bool *earlyExit);
+
+ static const int32_t MAX_SURFACES_PER_STREAM = 4;
+
+ static const int32_t ROUNDING_WIDTH_CAP = 1920;
};
} // android
diff --git a/services/mediacodec/Android.bp b/services/mediacodec/Android.bp
index 05bbbc7..dc0773b 100644
--- a/services/mediacodec/Android.bp
+++ b/services/mediacodec/Android.bp
@@ -19,8 +19,6 @@
"libmedia_headers",
],
- init_rc: ["mediaswcodec.rc"],
-
cflags: [
"-Werror",
"-Wall",
diff --git a/services/mediacodec/mediaswcodec.rc b/services/mediacodec/mediaswcodec.rc
deleted file mode 100644
index 3549666..0000000
--- a/services/mediacodec/mediaswcodec.rc
+++ /dev/null
@@ -1,7 +0,0 @@
-service media.swcodec /system/bin/mediaswcodec
- class main
- user mediacodec
- group camera drmrpc mediadrm
- updatable
- ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index 0d53c5e..cdf5a4e 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -1,8 +1,53 @@
+filegroup {
+ name: "resourcemanager_aidl",
+ srcs: [
+ "aidl/android/media/IResourceManagerClient.aidl",
+ "aidl/android/media/IResourceManagerService.aidl",
+ "aidl/android/media/MediaResourceType.aidl",
+ "aidl/android/media/MediaResourceSubType.aidl",
+ "aidl/android/media/MediaResourceParcel.aidl",
+ "aidl/android/media/MediaResourcePolicyParcel.aidl",
+ ],
+ path: "aidl",
+}
+
+filegroup {
+ name: "resourceobserver_aidl",
+ srcs: [
+ "aidl/android/media/IResourceObserver.aidl",
+ "aidl/android/media/IResourceObserverService.aidl",
+ "aidl/android/media/MediaObservableEvent.aidl",
+ "aidl/android/media/MediaObservableFilter.aidl",
+ "aidl/android/media/MediaObservableType.aidl",
+ "aidl/android/media/MediaObservableParcel.aidl",
+ ],
+ path: "aidl",
+}
+
+aidl_interface {
+ name: "resourcemanager_aidl_interface",
+ unstable: true,
+ local_include_dir: "aidl",
+ srcs: [
+ ":resourcemanager_aidl",
+ ],
+}
+
+aidl_interface {
+ name: "resourceobserver_aidl_interface",
+ unstable: true,
+ local_include_dir: "aidl",
+ srcs: [
+ ":resourceobserver_aidl",
+ ],
+}
+
cc_library {
name: "libresourcemanagerservice",
srcs: [
"ResourceManagerService.cpp",
+ "ResourceObserverService.cpp",
"ServiceLog.cpp",
],
@@ -15,6 +60,10 @@
"liblog",
],
+ static_libs: [
+ "resourceobserver_aidl_interface-ndk_platform",
+ ],
+
include_dirs: ["frameworks/av/include"],
cflags: [
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 3d36f8e..90a04ac 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -36,18 +36,54 @@
#include <unistd.h>
#include "ResourceManagerService.h"
+#include "ResourceObserverService.h"
#include "ServiceLog.h"
namespace android {
+//static
+std::mutex ResourceManagerService::sCookieLock;
+//static
+uintptr_t ResourceManagerService::sCookieCounter = 0;
+//static
+std::map<uintptr_t, sp<DeathNotifier> > ResourceManagerService::sCookieToDeathNotifierMap;
+
+class DeathNotifier : public RefBase {
+public:
+ DeathNotifier(const std::shared_ptr<ResourceManagerService> &service,
+ int pid, int64_t clientId);
+
+ virtual ~DeathNotifier() {}
+
+ // Implement death recipient
+ static void BinderDiedCallback(void* cookie);
+ virtual void binderDied();
+
+protected:
+ std::weak_ptr<ResourceManagerService> mService;
+ int mPid;
+ int64_t mClientId;
+};
+
DeathNotifier::DeathNotifier(const std::shared_ptr<ResourceManagerService> &service,
int pid, int64_t clientId)
: mService(service), mPid(pid), mClientId(clientId) {}
//static
void DeathNotifier::BinderDiedCallback(void* cookie) {
- auto thiz = static_cast<DeathNotifier*>(cookie);
- thiz->binderDied();
+ sp<DeathNotifier> notifier;
+ {
+ std::scoped_lock lock{ResourceManagerService::sCookieLock};
+ auto it = ResourceManagerService::sCookieToDeathNotifierMap.find(
+ reinterpret_cast<uintptr_t>(cookie));
+ if (it == ResourceManagerService::sCookieToDeathNotifierMap.end()) {
+ return;
+ }
+ notifier = it->second;
+ }
+ if (notifier.get() != nullptr) {
+ notifier->binderDied();
+ }
}
void DeathNotifier::binderDied() {
@@ -61,7 +97,27 @@
service->overridePid(mPid, -1);
// thiz is freed in the call below, so it must be last call referring thiz
service->removeResource(mPid, mClientId, false);
+}
+class OverrideProcessInfoDeathNotifier : public DeathNotifier {
+public:
+ OverrideProcessInfoDeathNotifier(const std::shared_ptr<ResourceManagerService> &service,
+ int pid) : DeathNotifier(service, pid, 0) {}
+
+ virtual ~OverrideProcessInfoDeathNotifier() {}
+
+ virtual void binderDied();
+};
+
+void OverrideProcessInfoDeathNotifier::binderDied() {
+ // Don't check for pid validity since we know it's already dead.
+ std::shared_ptr<ResourceManagerService> service = mService.lock();
+ if (service == nullptr) {
+ ALOGW("ResourceManagerService is dead as well.");
+ return;
+ }
+
+ service->removeProcessInfoOverride(mPid);
}
template <typename T>
@@ -116,6 +172,7 @@
info.uid = uid;
info.clientId = clientId;
info.client = client;
+ info.cookie = 0;
info.pendingRemoval = false;
index = infos.add(clientId, info);
@@ -267,6 +324,13 @@
if (status != STATUS_OK) {
return;
}
+
+ std::shared_ptr<ResourceObserverService> observerService =
+ ResourceObserverService::instantiate();
+
+ if (observerService != nullptr) {
+ service->setObserverService(observerService);
+ }
// TODO: mediaserver main() is already starting the thread pool,
// move this to mediaserver main() when other services in mediaserver
// are converted to ndk-platform aidl.
@@ -275,6 +339,11 @@
ResourceManagerService::~ResourceManagerService() {}
+void ResourceManagerService::setObserverService(
+ const std::shared_ptr<ResourceObserverService>& observerService) {
+ mObserverService = observerService;
+}
+
Status ResourceManagerService::config(const std::vector<MediaResourcePolicyParcel>& policies) {
String8 log = String8::format("config(%s)", getString(policies).string());
mServiceLog->add(log);
@@ -358,6 +427,7 @@
}
ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
ResourceInfo& info = getResourceInfoForEdit(uid, clientId, client, infos);
+ ResourceList resourceAdded;
for (size_t i = 0; i < resources.size(); ++i) {
const auto &res = resources[i];
@@ -379,11 +449,20 @@
} else {
mergeResources(info.resources[resType], res);
}
+ // Add it to the list of added resources for observers.
+ auto it = resourceAdded.find(resType);
+ if (it == resourceAdded.end()) {
+ resourceAdded[resType] = res;
+ } else {
+ mergeResources(it->second, res);
+ }
}
- if (info.deathNotifier == nullptr && client != nullptr) {
- info.deathNotifier = new DeathNotifier(ref<ResourceManagerService>(), pid, clientId);
- AIBinder_linkToDeath(client->asBinder().get(),
- mDeathRecipient.get(), info.deathNotifier.get());
+ if (info.cookie == 0 && client != nullptr) {
+ info.cookie = addCookieAndLink_l(client->asBinder(),
+ new DeathNotifier(ref<ResourceManagerService>(), pid, clientId));
+ }
+ if (mObserverService != nullptr && !resourceAdded.empty()) {
+ mObserverService->onResourceAdded(uid, pid, resourceAdded);
}
notifyResourceGranted(pid, resources);
return Status::ok();
@@ -415,7 +494,7 @@
}
ResourceInfo &info = infos.editValueAt(index);
-
+ ResourceList resourceRemoved;
for (size_t i = 0; i < resources.size(); ++i) {
const auto &res = resources[i];
const auto resType = std::tuple(res.type, res.subType, res.id);
@@ -427,14 +506,27 @@
// ignore if we don't have it
if (info.resources.find(resType) != info.resources.end()) {
MediaResourceParcel &resource = info.resources[resType];
+ MediaResourceParcel actualRemoved = res;
if (resource.value > res.value) {
resource.value -= res.value;
} else {
onLastRemoved(res, info);
info.resources.erase(resType);
+ actualRemoved.value = resource.value;
+ }
+
+ // Add it to the list of removed resources for observers.
+ auto it = resourceRemoved.find(resType);
+ if (it == resourceRemoved.end()) {
+ resourceRemoved[resType] = actualRemoved;
+ } else {
+ mergeResources(it->second, actualRemoved);
}
}
}
+ if (mObserverService != nullptr && !resourceRemoved.empty()) {
+ mObserverService->onResourceRemoved(info.uid, pid, resourceRemoved);
+ }
return Status::ok();
}
@@ -472,8 +564,11 @@
onLastRemoved(it->second, info);
}
- AIBinder_unlinkToDeath(info.client->asBinder().get(),
- mDeathRecipient.get(), info.deathNotifier.get());
+ removeCookieAndUnlink_l(info.client->asBinder(), info.cookie);
+
+ if (mObserverService != nullptr && !info.resources.empty()) {
+ mObserverService->onResourceRemoved(info.uid, pid, info.resources);
+ }
infos.removeItemsAt(index);
return Status::ok();
@@ -651,6 +746,83 @@
return Status::ok();
}
+Status ResourceManagerService::overrideProcessInfo(
+ const std::shared_ptr<IResourceManagerClient>& client,
+ int pid,
+ int procState,
+ int oomScore) {
+ String8 log = String8::format("overrideProcessInfo(pid %d, procState %d, oomScore %d)",
+ pid, procState, oomScore);
+ mServiceLog->add(log);
+
+ // Only allow the override if the caller already can access process state and oom scores.
+ int callingPid = AIBinder_getCallingPid();
+ if (callingPid != getpid() && (callingPid != pid || !checkCallingPermission(String16(
+ "android.permission.GET_PROCESS_STATE_AND_OOM_SCORE")))) {
+ ALOGE("Permission Denial: overrideProcessInfo method from pid=%d", callingPid);
+ return Status::fromServiceSpecificError(PERMISSION_DENIED);
+ }
+
+ if (client == nullptr) {
+ return Status::fromServiceSpecificError(BAD_VALUE);
+ }
+
+ Mutex::Autolock lock(mLock);
+ removeProcessInfoOverride_l(pid);
+
+ if (!mProcessInfo->overrideProcessInfo(pid, procState, oomScore)) {
+ // Override value is rejected by ProcessInfo.
+ return Status::fromServiceSpecificError(BAD_VALUE);
+ }
+
+ uintptr_t cookie = addCookieAndLink_l(client->asBinder(),
+ new OverrideProcessInfoDeathNotifier(ref<ResourceManagerService>(), pid));
+
+ mProcessInfoOverrideMap.emplace(pid, ProcessInfoOverride{cookie, client});
+
+ return Status::ok();
+}
+
+uintptr_t ResourceManagerService::addCookieAndLink_l(
+ ::ndk::SpAIBinder binder, const sp<DeathNotifier>& notifier) {
+ std::scoped_lock lock{sCookieLock};
+
+ uintptr_t cookie;
+ // Need to skip cookie 0 (if it wraps around). ResourceInfo has cookie initialized to 0
+ // indicating the death notifier is not created yet.
+ while ((cookie = ++sCookieCounter) == 0);
+ AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(), (void*)cookie);
+ sCookieToDeathNotifierMap.emplace(cookie, notifier);
+
+ return cookie;
+}
+
+void ResourceManagerService::removeCookieAndUnlink_l(
+ ::ndk::SpAIBinder binder, uintptr_t cookie) {
+ std::scoped_lock lock{sCookieLock};
+ AIBinder_unlinkToDeath(binder.get(), mDeathRecipient.get(), (void*)cookie);
+ sCookieToDeathNotifierMap.erase(cookie);
+}
+
+void ResourceManagerService::removeProcessInfoOverride(int pid) {
+ Mutex::Autolock lock(mLock);
+
+ removeProcessInfoOverride_l(pid);
+}
+
+void ResourceManagerService::removeProcessInfoOverride_l(int pid) {
+ auto it = mProcessInfoOverrideMap.find(pid);
+ if (it == mProcessInfoOverrideMap.end()) {
+ return;
+ }
+
+ mProcessInfo->removeProcessInfoOverride(pid);
+
+ removeCookieAndUnlink_l(it->second.client->asBinder(), it->second.cookie);
+
+ mProcessInfoOverrideMap.erase(pid);
+}
+
Status ResourceManagerService::markClientForPendingRemoval(int32_t pid, int64_t clientId) {
String8 log = String8::format(
"markClientForPendingRemoval(pid %d, clientId %lld)",
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 49c247e..1aa1e09 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -19,6 +19,7 @@
#define ANDROID_MEDIA_RESOURCEMANAGERSERVICE_H
#include <map>
+#include <mutex>
#include <aidl/android/media/BnResourceManagerService.h>
#include <arpa/inet.h>
@@ -33,6 +34,7 @@
class DeathNotifier;
class ResourceManagerService;
+class ResourceObserverService;
class ServiceLog;
struct ProcessInfoInterface;
@@ -50,7 +52,7 @@
int64_t clientId;
uid_t uid;
std::shared_ptr<IResourceManagerClient> client;
- sp<DeathNotifier> deathNotifier;
+ uintptr_t cookie{0};
ResourceList resources;
bool pendingRemoval{false};
};
@@ -59,22 +61,6 @@
typedef KeyedVector<int64_t, ResourceInfo> ResourceInfos;
typedef KeyedVector<int, ResourceInfos> PidResourceInfosMap;
-class DeathNotifier : public RefBase {
-public:
- DeathNotifier(const std::shared_ptr<ResourceManagerService> &service,
- int pid, int64_t clientId);
-
- ~DeathNotifier() {}
-
- // Implement death recipient
- static void BinderDiedCallback(void* cookie);
- void binderDied();
-
-private:
- std::weak_ptr<ResourceManagerService> mService;
- int mPid;
- int64_t mClientId;
-};
class ResourceManagerService : public BnResourceManagerService {
public:
struct SystemCallbackInterface : public RefBase {
@@ -95,6 +81,8 @@
const sp<ProcessInfoInterface> &processInfo,
const sp<SystemCallbackInterface> &systemResource);
virtual ~ResourceManagerService();
+ void setObserverService(
+ const std::shared_ptr<ResourceObserverService>& observerService);
// IResourceManagerService interface
Status config(const std::vector<MediaResourcePolicyParcel>& policies) override;
@@ -125,12 +113,20 @@
int originalPid,
int newPid) override;
+ Status overrideProcessInfo(
+ const std::shared_ptr<IResourceManagerClient>& client,
+ int pid,
+ int procState,
+ int oomScore) override;
+
Status markClientForPendingRemoval(int32_t pid, int64_t clientId) override;
Status removeResource(int pid, int64_t clientId, bool checkValid);
private:
friend class ResourceManagerServiceTest;
+ friend class DeathNotifier;
+ friend class OverrideProcessInfoDeathNotifier;
// Gets the list of all the clients who own the specified resource type.
// Returns false if any client belongs to a process with higher priority than the
@@ -170,6 +166,12 @@
// Get priority from process's pid
bool getPriority_l(int pid, int* priority);
+ void removeProcessInfoOverride(int pid);
+
+ void removeProcessInfoOverride_l(int pid);
+ uintptr_t addCookieAndLink_l(::ndk::SpAIBinder binder, const sp<DeathNotifier>& notifier);
+ void removeCookieAndUnlink_l(::ndk::SpAIBinder binder, uintptr_t cookie);
+
mutable Mutex mLock;
sp<ProcessInfoInterface> mProcessInfo;
sp<SystemCallbackInterface> mSystemCB;
@@ -179,7 +181,17 @@
bool mSupportsSecureWithNonSecureCodec;
int32_t mCpuBoostCount;
::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
+ struct ProcessInfoOverride {
+ uintptr_t cookie;
+ std::shared_ptr<IResourceManagerClient> client;
+ };
std::map<int, int> mOverridePidMap;
+ std::map<pid_t, ProcessInfoOverride> mProcessInfoOverrideMap;
+ static std::mutex sCookieLock;
+ static uintptr_t sCookieCounter GUARDED_BY(sCookieLock);
+ static std::map<uintptr_t, sp<DeathNotifier> > sCookieToDeathNotifierMap
+ GUARDED_BY(sCookieLock);
+ std::shared_ptr<ResourceObserverService> mObserverService;
};
// ----------------------------------------------------------------------------
diff --git a/services/mediaresourcemanager/ResourceObserverService.cpp b/services/mediaresourcemanager/ResourceObserverService.cpp
new file mode 100644
index 0000000..7c4c875
--- /dev/null
+++ b/services/mediaresourcemanager/ResourceObserverService.cpp
@@ -0,0 +1,312 @@
+/**
+ *
+ * Copyright 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ResourceObserverService"
+#include <utils/Log.h>
+
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <binder/IServiceManager.h>
+#include <utils/String16.h>
+#include <aidl/android/media/MediaResourceParcel.h>
+
+#include "ResourceObserverService.h"
+
+namespace aidl {
+namespace android {
+namespace media {
+bool operator<(const MediaObservableFilter& lhs, const MediaObservableFilter &rhs) {
+ return lhs.type < rhs.type || (lhs.type == rhs.type && lhs.eventFilter < rhs.eventFilter);
+}
+}}} // namespace ::aidl::android::media
+
+namespace android {
+
+using ::aidl::android::media::MediaResourceParcel;
+using ::aidl::android::media::MediaObservableEvent;
+
+// MediaObservableEvent will be used as uint64_t flags.
+static_assert(sizeof(MediaObservableEvent) == sizeof(uint64_t));
+
+static std::vector<MediaObservableEvent> sEvents = {
+ MediaObservableEvent::kBusy,
+ MediaObservableEvent::kIdle,
+};
+
+static MediaObservableType getObservableType(const MediaResourceParcel& res) {
+ if (res.subType == MediaResourceSubType::kVideoCodec) {
+ if (res.type == MediaResourceType::kNonSecureCodec) {
+ return MediaObservableType::kVideoNonSecureCodec;
+ }
+ if (res.type == MediaResourceType::kSecureCodec) {
+ return MediaObservableType::kVideoSecureCodec;
+ }
+ }
+ return MediaObservableType::kInvalid;
+}
+
+//static
+std::mutex ResourceObserverService::sDeathRecipientLock;
+//static
+std::map<uintptr_t, std::shared_ptr<ResourceObserverService::DeathRecipient> >
+ResourceObserverService::sDeathRecipientMap;
+
+struct ResourceObserverService::DeathRecipient {
+ DeathRecipient(ResourceObserverService* _service,
+ const std::shared_ptr<IResourceObserver>& _observer)
+ : service(_service), observer(_observer) {}
+ ~DeathRecipient() {}
+
+ void binderDied() {
+ if (service != nullptr) {
+ service->unregisterObserver(observer);
+ }
+ }
+
+ ResourceObserverService* service;
+ std::shared_ptr<IResourceObserver> observer;
+};
+
+// static
+void ResourceObserverService::BinderDiedCallback(void* cookie) {
+ uintptr_t id = reinterpret_cast<uintptr_t>(cookie);
+
+ ALOGW("Observer %lld is dead", (long long)id);
+
+ std::shared_ptr<DeathRecipient> recipient;
+
+ {
+ std::scoped_lock lock{sDeathRecipientLock};
+
+ auto it = sDeathRecipientMap.find(id);
+ if (it != sDeathRecipientMap.end()) {
+ recipient = it->second;
+ }
+ }
+
+ if (recipient != nullptr) {
+ recipient->binderDied();
+ }
+}
+
+//static
+std::shared_ptr<ResourceObserverService> ResourceObserverService::instantiate() {
+ std::shared_ptr<ResourceObserverService> observerService =
+ ::ndk::SharedRefBase::make<ResourceObserverService>();
+ binder_status_t status = AServiceManager_addService(observerService->asBinder().get(),
+ ResourceObserverService::getServiceName());
+ if (status != STATUS_OK) {
+ return nullptr;
+ }
+ return observerService;
+}
+
+ResourceObserverService::ResourceObserverService()
+ : mDeathRecipient(AIBinder_DeathRecipient_new(BinderDiedCallback)) {}
+
+binder_status_t ResourceObserverService::dump(
+ int fd, const char** /*args*/, uint32_t /*numArgs*/) {
+ String8 result;
+
+ if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+ result.format("Permission Denial: "
+ "can't dump ResourceManagerService from pid=%d, uid=%d\n",
+ AIBinder_getCallingPid(),
+ AIBinder_getCallingUid());
+ write(fd, result.string(), result.size());
+ return PERMISSION_DENIED;
+ }
+
+ result.appendFormat("ResourceObserverService: %p\n", this);
+ result.appendFormat(" Registered Observers: %zu\n", mObserverInfoMap.size());
+
+ {
+ std::scoped_lock lock{mObserverLock};
+
+ for (auto &observer : mObserverInfoMap) {
+ result.appendFormat(" Observer %p:\n", observer.second.binder.get());
+ for (auto &observable : observer.second.filters) {
+ String8 enabledEventsStr;
+ for (auto &event : sEvents) {
+ if (((uint64_t)observable.eventFilter & (uint64_t)event) != 0) {
+ if (!enabledEventsStr.isEmpty()) {
+ enabledEventsStr.append("|");
+ }
+ enabledEventsStr.append(toString(event).c_str());
+ }
+ }
+ result.appendFormat(" %s: %s\n",
+ toString(observable.type).c_str(), enabledEventsStr.c_str());
+ }
+ }
+ }
+
+ write(fd, result.string(), result.size());
+ return OK;
+}
+
+Status ResourceObserverService::registerObserver(
+ const std::shared_ptr<IResourceObserver>& in_observer,
+ const std::vector<MediaObservableFilter>& in_filters) {
+ // TODO(chz): Guard this by a permission.
+
+ ::ndk::SpAIBinder binder = in_observer->asBinder();
+
+ {
+ std::scoped_lock lock{mObserverLock};
+
+ if (mObserverInfoMap.find((uintptr_t)binder.get()) != mObserverInfoMap.end()) {
+ return Status::fromServiceSpecificError(ALREADY_EXISTS);
+ }
+
+ if (in_filters.empty()) {
+ return Status::fromServiceSpecificError(BAD_VALUE);
+ }
+
+ // Add observer info.
+ mObserverInfoMap.emplace((uintptr_t)binder.get(),
+ ObserverInfo{binder, in_observer, in_filters});
+
+ // Add observer to observable->subscribers map.
+ for (auto &filter : in_filters) {
+ for (auto &event : sEvents) {
+ if (!((uint64_t)filter.eventFilter & (uint64_t)event)) {
+ continue;
+ }
+ MediaObservableFilter key{filter.type, event};
+ mObservableToSubscribersMap[key].emplace((uintptr_t)binder.get(), in_observer);
+ }
+ }
+ }
+
+ // Add death binder and link.
+ uintptr_t cookie = (uintptr_t)binder.get();
+ {
+ std::scoped_lock lock{sDeathRecipientLock};
+ sDeathRecipientMap.emplace(
+ cookie, std::make_shared<DeathRecipient>(this, in_observer));
+ }
+
+ AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(),
+ reinterpret_cast<void*>(cookie));
+
+ return Status::ok();
+}
+
+Status ResourceObserverService::unregisterObserver(
+ const std::shared_ptr<IResourceObserver>& in_observer) {
+ // TODO(chz): Guard this by a permission.
+
+ ::ndk::SpAIBinder binder = in_observer->asBinder();
+
+ {
+ std::scoped_lock lock{mObserverLock};
+
+ auto it = mObserverInfoMap.find((uintptr_t)binder.get());
+ if (it == mObserverInfoMap.end()) {
+ return Status::fromServiceSpecificError(NAME_NOT_FOUND);
+ }
+
+ // Remove observer from observable->subscribers map.
+ for (auto &filter : it->second.filters) {
+ for (auto &event : sEvents) {
+ if (!((uint64_t)filter.eventFilter & (uint64_t)event)) {
+ continue;
+ }
+ MediaObservableFilter key{filter.type, event};
+ mObservableToSubscribersMap[key].erase((uintptr_t)binder.get());
+
+ //Remove the entry if there's no more subscribers.
+ if (mObservableToSubscribersMap[key].empty()) {
+ mObservableToSubscribersMap.erase(key);
+ }
+ }
+ }
+
+ // Remove observer info.
+ mObserverInfoMap.erase(it);
+ }
+
+ // Unlink and remove death binder.
+ uintptr_t cookie = (uintptr_t)binder.get();
+ AIBinder_unlinkToDeath(binder.get(), mDeathRecipient.get(),
+ reinterpret_cast<void*>(cookie));
+
+ {
+ std::scoped_lock lock{sDeathRecipientLock};
+ sDeathRecipientMap.erase(cookie);
+ }
+
+ return Status::ok();
+}
+
+void ResourceObserverService::notifyObservers(
+ MediaObservableEvent event, int uid, int pid, const ResourceList &resources) {
+ struct CalleeInfo {
+ std::shared_ptr<IResourceObserver> observer;
+ std::vector<MediaObservableParcel> monitors;
+ };
+ // Build a consolidated list of observers to call with their respective observables.
+ std::map<uintptr_t, CalleeInfo> calleeList;
+
+ {
+ std::scoped_lock lock{mObserverLock};
+
+ for (auto &res : resources) {
+ // Skip if this resource doesn't map to any observable type.
+ MediaObservableType observableType = getObservableType(res.second);
+ if (observableType == MediaObservableType::kInvalid) {
+ continue;
+ }
+ MediaObservableFilter key{observableType, event};
+ // Skip if no one subscribed to this observable.
+ auto observableIt = mObservableToSubscribersMap.find(key);
+ if (observableIt == mObservableToSubscribersMap.end()) {
+ continue;
+ }
+ // Loop through all subsribers.
+ for (auto &subscriber : observableIt->second) {
+ auto calleeIt = calleeList.find(subscriber.first);
+ if (calleeIt == calleeList.end()) {
+ calleeList.emplace(subscriber.first, CalleeInfo{
+ subscriber.second, {{observableType, res.second.value}}});
+ } else {
+ calleeIt->second.monitors.push_back({observableType, res.second.value});
+ }
+ }
+ }
+ }
+
+ // Finally call the observers about the status change.
+ for (auto &calleeInfo : calleeList) {
+ calleeInfo.second.observer->onStatusChanged(
+ event, uid, pid, calleeInfo.second.monitors);
+ }
+}
+
+void ResourceObserverService::onResourceAdded(
+ int uid, int pid, const ResourceList &resources) {
+ notifyObservers(MediaObservableEvent::kBusy, uid, pid, resources);
+}
+
+void ResourceObserverService::onResourceRemoved(
+ int uid, int pid, const ResourceList &resources) {
+ notifyObservers(MediaObservableEvent::kIdle, uid, pid, resources);
+}
+
+} // namespace android
diff --git a/services/mediaresourcemanager/ResourceObserverService.h b/services/mediaresourcemanager/ResourceObserverService.h
new file mode 100644
index 0000000..46bc5fb
--- /dev/null
+++ b/services/mediaresourcemanager/ResourceObserverService.h
@@ -0,0 +1,95 @@
+/**
+ *
+ * Copyright 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_RESOURCE_OBSERVER_SERVICE_H
+#define ANDROID_MEDIA_RESOURCE_OBSERVER_SERVICE_H
+
+#include <map>
+
+#include <aidl/android/media/BnResourceObserverService.h>
+#include "ResourceManagerService.h"
+
+namespace android {
+
+using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::BnResourceObserverService;
+using ::aidl::android::media::IResourceObserver;
+using ::aidl::android::media::MediaObservableFilter;
+using ::aidl::android::media::MediaObservableParcel;
+using ::aidl::android::media::MediaObservableType;
+using ::aidl::android::media::MediaObservableEvent;
+
+class ResourceObserverService : public BnResourceObserverService {
+public:
+
+ static char const *getServiceName() { return "media.resource_observer"; }
+ static std::shared_ptr<ResourceObserverService> instantiate();
+
+ virtual inline binder_status_t dump(
+ int /*fd*/, const char** /*args*/, uint32_t /*numArgs*/);
+
+ ResourceObserverService();
+ virtual ~ResourceObserverService() {}
+
+ // IResourceObserverService interface
+ Status registerObserver(const std::shared_ptr<IResourceObserver>& in_observer,
+ const std::vector<MediaObservableFilter>& in_filters) override;
+
+ Status unregisterObserver(const std::shared_ptr<IResourceObserver>& in_observer) override;
+ // ~IResourceObserverService interface
+
+ // Called by ResourceManagerService when resources are added.
+ void onResourceAdded(int uid, int pid, const ResourceList &resources);
+
+ // Called by ResourceManagerService when resources are removed.
+ void onResourceRemoved(int uid, int pid, const ResourceList &resources);
+
+private:
+ struct ObserverInfo {
+ ::ndk::SpAIBinder binder;
+ std::shared_ptr<IResourceObserver> observer;
+ std::vector<MediaObservableFilter> filters;
+ };
+ struct DeathRecipient;
+
+ // Below maps are all keyed on the observer's binder ptr value.
+ using ObserverInfoMap = std::map<uintptr_t, ObserverInfo>;
+ using SubscriberMap = std::map<uintptr_t, std::shared_ptr<IResourceObserver>>;
+
+ std::mutex mObserverLock;
+ // Binder->ObserverInfo
+ ObserverInfoMap mObserverInfoMap GUARDED_BY(mObserverLock);
+ // Observable(<type,event>)->Subscribers
+ std::map<MediaObservableFilter, SubscriberMap> mObservableToSubscribersMap
+ GUARDED_BY(mObserverLock);
+
+ ::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
+
+ // Binder death handling.
+ static std::mutex sDeathRecipientLock;
+ static std::map<uintptr_t, std::shared_ptr<DeathRecipient>> sDeathRecipientMap
+ GUARDED_BY(sDeathRecipientLock);
+ static void BinderDiedCallback(void* cookie);
+
+ void notifyObservers(MediaObservableEvent event,
+ int uid, int pid, const ResourceList &resources);
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_MEDIA_RESOURCE_OBSERVER_SERVICE_H
diff --git a/services/mediaresourcemanager/TEST_MAPPING b/services/mediaresourcemanager/TEST_MAPPING
index 418b159..52ad441 100644
--- a/services/mediaresourcemanager/TEST_MAPPING
+++ b/services/mediaresourcemanager/TEST_MAPPING
@@ -5,6 +5,9 @@
},
{
"name": "ServiceLog_test"
+ },
+ {
+ "name": "ResourceObserverService_test"
}
]
}
diff --git a/media/libmedia/aidl/android/media/IResourceManagerClient.aidl b/services/mediaresourcemanager/aidl/android/media/IResourceManagerClient.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/IResourceManagerClient.aidl
rename to services/mediaresourcemanager/aidl/android/media/IResourceManagerClient.aidl
diff --git a/media/libmedia/aidl/android/media/IResourceManagerService.aidl b/services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl
similarity index 78%
rename from media/libmedia/aidl/android/media/IResourceManagerService.aidl
rename to services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl
index 1b2d522..5cf8686 100644
--- a/media/libmedia/aidl/android/media/IResourceManagerService.aidl
+++ b/services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl
@@ -96,6 +96,28 @@
void overridePid(int originalPid, int newPid);
/**
+ * Override the process state and OOM score of the calling process with the
+ * the specified values. This is used by native service processes to specify
+ * these values for ResourceManagerService to use. ResourceManagerService usually
+ * gets these values from ActivityManagerService, however, ActivityManagerService
+ * doesn't track native service processes.
+ *
+ * @param client a token for the ResourceManagerService to link to the caller and
+ * receive notification if it goes away. This is needed for clearing
+ * the overrides.
+ * @param pid pid of the calling process.
+ * @param procState the process state value that ResourceManagerService should
+ * use for this pid.
+ * @param oomScore the oom score value that ResourceManagerService should
+ * use for this pid.
+ */
+ void overrideProcessInfo(
+ IResourceManagerClient client,
+ int pid,
+ int procState,
+ int oomScore);
+
+ /**
* Mark a client for pending removal
*
* @param pid pid from which the client's resources will be removed.
diff --git a/services/mediaresourcemanager/aidl/android/media/IResourceObserver.aidl b/services/mediaresourcemanager/aidl/android/media/IResourceObserver.aidl
new file mode 100644
index 0000000..462009a
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/IResourceObserver.aidl
@@ -0,0 +1,39 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MediaObservableEvent;
+import android.media.MediaObservableParcel;
+
+/**
+ * IResourceObserver interface for receiving observable resource updates
+ * from IResourceObserverService.
+ *
+ * {@hide}
+ */
+interface IResourceObserver {
+ /**
+ * Called when an observed resource is granted to a client.
+ *
+ * @param event the status change that happened to the resource.
+ * @param uid uid to which the resource is associated.
+ * @param pid pid to which the resource is associated.
+ * @param observables the resources whose status has changed.
+ */
+ oneway void onStatusChanged(MediaObservableEvent event,
+ int uid, int pid, in MediaObservableParcel[] observables);
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/IResourceObserverService.aidl b/services/mediaresourcemanager/aidl/android/media/IResourceObserverService.aidl
new file mode 100644
index 0000000..08f4ca0
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/IResourceObserverService.aidl
@@ -0,0 +1,49 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.IResourceObserver;
+import android.media.MediaObservableFilter;
+
+/**
+ * IResourceObserverService interface for registering an IResourceObserver
+ * callback to receive status updates about observable media resources.
+ *
+ * {@hide}
+ */
+interface IResourceObserverService {
+
+ /**
+ * Register an observer on the IResourceObserverService to receive
+ * status updates for observable resources.
+ *
+ * @param observer the observer to register.
+ * @param filters an array of filters for resources and events to receive
+ * updates for.
+ */
+ void registerObserver(
+ IResourceObserver observer,
+ in MediaObservableFilter[] filters);
+
+ /**
+ * Unregister an observer from the IResourceObserverService.
+ * The observer will stop receiving the status updates.
+ *
+ * @param observer the observer to unregister.
+ */
+ void unregisterObserver(IResourceObserver observer);
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/MediaObservableEvent.aidl b/services/mediaresourcemanager/aidl/android/media/MediaObservableEvent.aidl
new file mode 100644
index 0000000..56ab24d
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/MediaObservableEvent.aidl
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * Enums for media observable events.
+ *
+ * These values are used as bitmasks to indicate the events that the
+ * observer is interested in in the MediaObservableFilter objects passed to
+ * IResourceObserverService::registerObserver().
+ *
+ * {@hide}
+ */
+@Backing(type="long")
+enum MediaObservableEvent {
+ /**
+ * A media resource is granted to a client and becomes busy.
+ */
+ kBusy = 1,
+
+ /**
+ * A media resource is released by a client and becomes idle.
+ */
+ kIdle = 2,
+
+ /**
+ * A bitmask that covers all observable events defined.
+ */
+ kAll = ~0,
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/MediaObservableFilter.aidl b/services/mediaresourcemanager/aidl/android/media/MediaObservableFilter.aidl
new file mode 100644
index 0000000..38f7e39
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/MediaObservableFilter.aidl
@@ -0,0 +1,43 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MediaObservableType;
+import android.media.MediaObservableEvent;
+
+/**
+ * Description of an observable resource and its associated events that the
+ * observer is interested in.
+ *
+ * {@hide}
+ */
+parcelable MediaObservableFilter {
+ /**
+ * Type of the observable media resource.
+ */
+ MediaObservableType type;
+
+ /**
+ * Events that the observer is interested in.
+ *
+ * This field is a bitwise-OR of the events in MediaObservableEvent. If a
+ * particular event's bit is set, it means that updates should be sent for
+ * that event. For example, if the observer is only interested in receiving
+ * updates when a resource becomes available, it should only set 'kIdle'.
+ */
+ MediaObservableEvent eventFilter;
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/MediaObservableParcel.aidl b/services/mediaresourcemanager/aidl/android/media/MediaObservableParcel.aidl
new file mode 100644
index 0000000..c4233e1
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/MediaObservableParcel.aidl
@@ -0,0 +1,37 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MediaObservableType;
+
+/**
+ * Description of an observable resource whose status has changed.
+ *
+ * {@hide}
+ */
+parcelable MediaObservableParcel {
+ /**
+ * Type of the observable media resource.
+ */
+ MediaObservableType type;// = MediaObservableType::kInvalid;
+
+ /**
+ * Number of units of the observable resource (number of codecs, bytes of
+ * graphic memory, etc.).
+ */
+ long value = 0;
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/MediaObservableType.aidl b/services/mediaresourcemanager/aidl/android/media/MediaObservableType.aidl
new file mode 100644
index 0000000..ed202da
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/MediaObservableType.aidl
@@ -0,0 +1,35 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * Type enums of observable media resources.
+ *
+ * {@hide}
+ */
+@Backing(type="int")
+enum MediaObservableType {
+ kInvalid = 0,
+
+ //kVideoStart = 1000,
+ kVideoSecureCodec = 1000,
+ kVideoNonSecureCodec = 1001,
+
+ //kAudioStart = 2000,
+
+ //kGraphicMemory = 3000,
+}
diff --git a/media/libmedia/aidl/android/media/MediaResourceParcel.aidl b/services/mediaresourcemanager/aidl/android/media/MediaResourceParcel.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/MediaResourceParcel.aidl
rename to services/mediaresourcemanager/aidl/android/media/MediaResourceParcel.aidl
diff --git a/media/libmedia/aidl/android/media/MediaResourcePolicyParcel.aidl b/services/mediaresourcemanager/aidl/android/media/MediaResourcePolicyParcel.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/MediaResourcePolicyParcel.aidl
rename to services/mediaresourcemanager/aidl/android/media/MediaResourcePolicyParcel.aidl
diff --git a/media/libmedia/aidl/android/media/MediaResourceSubType.aidl b/services/mediaresourcemanager/aidl/android/media/MediaResourceSubType.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/MediaResourceSubType.aidl
rename to services/mediaresourcemanager/aidl/android/media/MediaResourceSubType.aidl
diff --git a/media/libmedia/aidl/android/media/MediaResourceType.aidl b/services/mediaresourcemanager/aidl/android/media/MediaResourceType.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/MediaResourceType.aidl
rename to services/mediaresourcemanager/aidl/android/media/MediaResourceType.aidl
diff --git a/services/mediaresourcemanager/test/Android.bp b/services/mediaresourcemanager/test/Android.bp
index 6b2ef69..308ee91 100644
--- a/services/mediaresourcemanager/test/Android.bp
+++ b/services/mediaresourcemanager/test/Android.bp
@@ -40,3 +40,28 @@
"-Wall",
],
}
+
+cc_test {
+ name: "ResourceObserverService_test",
+ srcs: ["ResourceObserverService_test.cpp"],
+ test_suites: ["device-tests"],
+ static_libs: [
+ "libresourcemanagerservice",
+ "resourceobserver_aidl_interface-ndk_platform",
+ ],
+ shared_libs: [
+ "libbinder",
+ "libbinder_ndk",
+ "liblog",
+ "libmedia",
+ "libutils",
+ ],
+ include_dirs: [
+ "frameworks/av/include",
+ "frameworks/av/services/mediaresourcemanager",
+ ],
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
diff --git a/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h b/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
new file mode 100644
index 0000000..4cf5f0a
--- /dev/null
+++ b/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "ResourceManagerService.h"
+#include <aidl/android/media/BnResourceManagerClient.h>
+#include <media/MediaResource.h>
+#include <media/MediaResourcePolicy.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/ProcessInfoInterface.h>
+
+namespace aidl {
+namespace android {
+namespace media {
+bool operator== (const MediaResourceParcel& lhs, const MediaResourceParcel& rhs) {
+ return lhs.type == rhs.type && lhs.subType == rhs.subType &&
+ lhs.id == rhs.id && lhs.value == rhs.value;
+}
+}}}
+
+namespace android {
+
+using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::BnResourceManagerClient;
+using ::aidl::android::media::IResourceManagerService;
+using ::aidl::android::media::IResourceManagerClient;
+using ::aidl::android::media::MediaResourceParcel;
+
+static int64_t getId(const std::shared_ptr<IResourceManagerClient>& client) {
+ return (int64_t) client.get();
+}
+
+struct TestProcessInfo : public ProcessInfoInterface {
+ TestProcessInfo() {}
+ virtual ~TestProcessInfo() {}
+
+ virtual bool getPriority(int pid, int *priority) {
+ // For testing, use pid as priority.
+ // Lower the value higher the priority.
+ *priority = pid;
+ return true;
+ }
+
+ virtual bool isValidPid(int /* pid */) {
+ return true;
+ }
+
+ virtual bool overrideProcessInfo(
+ int /* pid */, int /* procState */, int /* oomScore */) {
+ return true;
+ }
+
+ virtual void removeProcessInfoOverride(int /* pid */) {
+ }
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(TestProcessInfo);
+};
+
+struct TestSystemCallback :
+ public ResourceManagerService::SystemCallbackInterface {
+ TestSystemCallback() :
+ mLastEvent({EventType::INVALID, 0}), mEventCount(0) {}
+
+ enum EventType {
+ INVALID = -1,
+ VIDEO_ON = 0,
+ VIDEO_OFF = 1,
+ VIDEO_RESET = 2,
+ CPUSET_ENABLE = 3,
+ CPUSET_DISABLE = 4,
+ };
+
+ struct EventEntry {
+ EventType type;
+ int arg;
+ };
+
+ virtual void noteStartVideo(int uid) override {
+ mLastEvent = {EventType::VIDEO_ON, uid};
+ mEventCount++;
+ }
+
+ virtual void noteStopVideo(int uid) override {
+ mLastEvent = {EventType::VIDEO_OFF, uid};
+ mEventCount++;
+ }
+
+ virtual void noteResetVideo() override {
+ mLastEvent = {EventType::VIDEO_RESET, 0};
+ mEventCount++;
+ }
+
+ virtual bool requestCpusetBoost(bool enable) override {
+ mLastEvent = {enable ? EventType::CPUSET_ENABLE : EventType::CPUSET_DISABLE, 0};
+ mEventCount++;
+ return true;
+ }
+
+ size_t eventCount() { return mEventCount; }
+ EventType lastEventType() { return mLastEvent.type; }
+ EventEntry lastEvent() { return mLastEvent; }
+
+protected:
+ virtual ~TestSystemCallback() {}
+
+private:
+ EventEntry mLastEvent;
+ size_t mEventCount;
+
+ DISALLOW_EVIL_CONSTRUCTORS(TestSystemCallback);
+};
+
+
+struct TestClient : public BnResourceManagerClient {
+ TestClient(int pid, const std::shared_ptr<ResourceManagerService> &service)
+ : mReclaimed(false), mPid(pid), mService(service) {}
+
+ Status reclaimResource(bool* _aidl_return) override {
+ mService->removeClient(mPid, getId(ref<TestClient>()));
+ mReclaimed = true;
+ *_aidl_return = true;
+ return Status::ok();
+ }
+
+ Status getName(::std::string* _aidl_return) override {
+ *_aidl_return = "test_client";
+ return Status::ok();
+ }
+
+ bool reclaimed() const {
+ return mReclaimed;
+ }
+
+ void reset() {
+ mReclaimed = false;
+ }
+
+ virtual ~TestClient() {}
+
+private:
+ bool mReclaimed;
+ int mPid;
+ std::shared_ptr<ResourceManagerService> mService;
+ DISALLOW_EVIL_CONSTRUCTORS(TestClient);
+};
+
+static const int kTestPid1 = 30;
+static const int kTestUid1 = 1010;
+
+static const int kTestPid2 = 20;
+static const int kTestUid2 = 1011;
+
+static const int kLowPriorityPid = 40;
+static const int kMidPriorityPid = 25;
+static const int kHighPriorityPid = 10;
+
+using EventType = TestSystemCallback::EventType;
+using EventEntry = TestSystemCallback::EventEntry;
+bool operator== (const EventEntry& lhs, const EventEntry& rhs) {
+ return lhs.type == rhs.type && lhs.arg == rhs.arg;
+}
+
+#define CHECK_STATUS_TRUE(condition) \
+ EXPECT_TRUE((condition).isOk() && (result))
+
+#define CHECK_STATUS_FALSE(condition) \
+ EXPECT_TRUE((condition).isOk() && !(result))
+
+class ResourceManagerServiceTestBase : public ::testing::Test {
+public:
+ ResourceManagerServiceTestBase()
+ : mSystemCB(new TestSystemCallback()),
+ mService(::ndk::SharedRefBase::make<ResourceManagerService>(
+ new TestProcessInfo, mSystemCB)),
+ mTestClient1(::ndk::SharedRefBase::make<TestClient>(kTestPid1, mService)),
+ mTestClient2(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)),
+ mTestClient3(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)) {
+ }
+
+ sp<TestSystemCallback> mSystemCB;
+ std::shared_ptr<ResourceManagerService> mService;
+ std::shared_ptr<IResourceManagerClient> mTestClient1;
+ std::shared_ptr<IResourceManagerClient> mTestClient2;
+ std::shared_ptr<IResourceManagerClient> mTestClient3;
+
+protected:
+ static bool isEqualResources(const std::vector<MediaResourceParcel> &resources1,
+ const ResourceList &resources2) {
+ // convert resource1 to ResourceList
+ ResourceList r1;
+ for (size_t i = 0; i < resources1.size(); ++i) {
+ const auto &res = resources1[i];
+ const auto resType = std::tuple(res.type, res.subType, res.id);
+ r1[resType] = res;
+ }
+ return r1 == resources2;
+ }
+
+ static void expectEqResourceInfo(const ResourceInfo &info,
+ int uid,
+ std::shared_ptr<IResourceManagerClient> client,
+ const std::vector<MediaResourceParcel> &resources) {
+ EXPECT_EQ(uid, info.uid);
+ EXPECT_EQ(client, info.client);
+ EXPECT_TRUE(isEqualResources(resources, info.resources));
+ }
+};
+
+} // namespace android
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index 702935d..15601aa 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -16,197 +16,17 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ResourceManagerService_test"
+
#include <utils/Log.h>
-#include <gtest/gtest.h>
-
+#include "ResourceManagerServiceTestUtils.h"
#include "ResourceManagerService.h"
-#include <aidl/android/media/BnResourceManagerClient.h>
-#include <media/MediaResource.h>
-#include <media/MediaResourcePolicy.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/ProcessInfoInterface.h>
-
-namespace aidl {
-namespace android {
-namespace media {
-bool operator== (const MediaResourceParcel& lhs, const MediaResourceParcel& rhs) {
- return lhs.type == rhs.type && lhs.subType == rhs.subType &&
- lhs.id == rhs.id && lhs.value == rhs.value;
-}}}}
namespace android {
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::BnResourceManagerClient;
-using ::aidl::android::media::IResourceManagerService;
-using ::aidl::android::media::IResourceManagerClient;
-
-static int64_t getId(const std::shared_ptr<IResourceManagerClient>& client) {
- return (int64_t) client.get();
-}
-
-struct TestProcessInfo : public ProcessInfoInterface {
- TestProcessInfo() {}
- virtual ~TestProcessInfo() {}
-
- virtual bool getPriority(int pid, int *priority) {
- // For testing, use pid as priority.
- // Lower the value higher the priority.
- *priority = pid;
- return true;
- }
-
- virtual bool isValidPid(int /* pid */) {
- return true;
- }
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(TestProcessInfo);
-};
-
-struct TestSystemCallback :
- public ResourceManagerService::SystemCallbackInterface {
- TestSystemCallback() :
- mLastEvent({EventType::INVALID, 0}), mEventCount(0) {}
-
- enum EventType {
- INVALID = -1,
- VIDEO_ON = 0,
- VIDEO_OFF = 1,
- VIDEO_RESET = 2,
- CPUSET_ENABLE = 3,
- CPUSET_DISABLE = 4,
- };
-
- struct EventEntry {
- EventType type;
- int arg;
- };
-
- virtual void noteStartVideo(int uid) override {
- mLastEvent = {EventType::VIDEO_ON, uid};
- mEventCount++;
- }
-
- virtual void noteStopVideo(int uid) override {
- mLastEvent = {EventType::VIDEO_OFF, uid};
- mEventCount++;
- }
-
- virtual void noteResetVideo() override {
- mLastEvent = {EventType::VIDEO_RESET, 0};
- mEventCount++;
- }
-
- virtual bool requestCpusetBoost(bool enable) override {
- mLastEvent = {enable ? EventType::CPUSET_ENABLE : EventType::CPUSET_DISABLE, 0};
- mEventCount++;
- return true;
- }
-
- size_t eventCount() { return mEventCount; }
- EventType lastEventType() { return mLastEvent.type; }
- EventEntry lastEvent() { return mLastEvent; }
-
-protected:
- virtual ~TestSystemCallback() {}
-
-private:
- EventEntry mLastEvent;
- size_t mEventCount;
-
- DISALLOW_EVIL_CONSTRUCTORS(TestSystemCallback);
-};
-
-
-struct TestClient : public BnResourceManagerClient {
- TestClient(int pid, const std::shared_ptr<ResourceManagerService> &service)
- : mReclaimed(false), mPid(pid), mService(service) {}
-
- Status reclaimResource(bool* _aidl_return) override {
- mService->removeClient(mPid, getId(ref<TestClient>()));
- mReclaimed = true;
- *_aidl_return = true;
- return Status::ok();
- }
-
- Status getName(::std::string* _aidl_return) override {
- *_aidl_return = "test_client";
- return Status::ok();
- }
-
- bool reclaimed() const {
- return mReclaimed;
- }
-
- void reset() {
- mReclaimed = false;
- }
-
- virtual ~TestClient() {}
-
-private:
- bool mReclaimed;
- int mPid;
- std::shared_ptr<ResourceManagerService> mService;
- DISALLOW_EVIL_CONSTRUCTORS(TestClient);
-};
-
-static const int kTestPid1 = 30;
-static const int kTestUid1 = 1010;
-
-static const int kTestPid2 = 20;
-static const int kTestUid2 = 1011;
-
-static const int kLowPriorityPid = 40;
-static const int kMidPriorityPid = 25;
-static const int kHighPriorityPid = 10;
-
-using EventType = TestSystemCallback::EventType;
-using EventEntry = TestSystemCallback::EventEntry;
-bool operator== (const EventEntry& lhs, const EventEntry& rhs) {
- return lhs.type == rhs.type && lhs.arg == rhs.arg;
-}
-
-#define CHECK_STATUS_TRUE(condition) \
- EXPECT_TRUE((condition).isOk() && (result))
-
-#define CHECK_STATUS_FALSE(condition) \
- EXPECT_TRUE((condition).isOk() && !(result))
-
-class ResourceManagerServiceTest : public ::testing::Test {
+class ResourceManagerServiceTest : public ResourceManagerServiceTestBase {
public:
- ResourceManagerServiceTest()
- : mSystemCB(new TestSystemCallback()),
- mService(::ndk::SharedRefBase::make<ResourceManagerService>(
- new TestProcessInfo, mSystemCB)),
- mTestClient1(::ndk::SharedRefBase::make<TestClient>(kTestPid1, mService)),
- mTestClient2(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)),
- mTestClient3(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)) {
- }
-
-protected:
- static bool isEqualResources(const std::vector<MediaResourceParcel> &resources1,
- const ResourceList &resources2) {
- // convert resource1 to ResourceList
- ResourceList r1;
- for (size_t i = 0; i < resources1.size(); ++i) {
- const auto &res = resources1[i];
- const auto resType = std::tuple(res.type, res.subType, res.id);
- r1[resType] = res;
- }
- return r1 == resources2;
- }
-
- static void expectEqResourceInfo(const ResourceInfo &info,
- int uid,
- std::shared_ptr<IResourceManagerClient> client,
- const std::vector<MediaResourceParcel> &resources) {
- EXPECT_EQ(uid, info.uid);
- EXPECT_EQ(client, info.client);
- EXPECT_TRUE(isEqualResources(resources, info.resources));
- }
+ ResourceManagerServiceTest() : ResourceManagerServiceTestBase() {}
void verifyClients(bool c1, bool c2, bool c3) {
TestClient *client1 = static_cast<TestClient*>(mTestClient1.get());
@@ -881,12 +701,6 @@
EXPECT_EQ(4u, mSystemCB->eventCount());
EXPECT_EQ(EventType::CPUSET_DISABLE, mSystemCB->lastEventType());
}
-
- sp<TestSystemCallback> mSystemCB;
- std::shared_ptr<ResourceManagerService> mService;
- std::shared_ptr<IResourceManagerClient> mTestClient1;
- std::shared_ptr<IResourceManagerClient> mTestClient2;
- std::shared_ptr<IResourceManagerClient> mTestClient3;
};
TEST_F(ResourceManagerServiceTest, config) {
diff --git a/services/mediaresourcemanager/test/ResourceObserverService_test.cpp b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
new file mode 100644
index 0000000..4c26246
--- /dev/null
+++ b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
@@ -0,0 +1,463 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ResourceObserverService_test"
+
+#include <iostream>
+#include <list>
+
+#include <aidl/android/media/BnResourceObserver.h>
+#include <utils/Log.h>
+#include "ResourceObserverService.h"
+#include "ResourceManagerServiceTestUtils.h"
+
+namespace aidl {
+namespace android {
+namespace media {
+bool operator==(const MediaObservableParcel& lhs, const MediaObservableParcel& rhs) {
+ return lhs.type == rhs.type && lhs.value == rhs.value;
+}
+}}} // namespace ::aidl::android::media
+
+namespace android {
+
+using ::aidl::android::media::BnResourceObserver;
+using ::aidl::android::media::MediaObservableParcel;
+using ::aidl::android::media::MediaObservableType;
+
+#define BUSY ::aidl::android::media::MediaObservableEvent::kBusy
+#define IDLE ::aidl::android::media::MediaObservableEvent::kIdle
+#define ALL ::aidl::android::media::MediaObservableEvent::kAll
+
+struct EventTracker {
+ struct Event {
+ enum { NoEvent, Busy, Idle } type = NoEvent;
+ int uid = 0;
+ int pid = 0;
+ std::vector<MediaObservableParcel> observables;
+ };
+
+ static const Event NoEvent;
+
+ static std::string toString(const MediaObservableParcel& observable) {
+ return "{" + ::aidl::android::media::toString(observable.type)
+ + ", " + std::to_string(observable.value) + "}";
+ }
+ static std::string toString(const Event& event) {
+ std::string eventStr;
+ switch (event.type) {
+ case Event::Busy:
+ eventStr = "Busy";
+ break;
+ case Event::Idle:
+ eventStr = "Idle";
+ break;
+ default:
+ return "NoEvent";
+ }
+ std::string observableStr;
+ for (auto &observable : event.observables) {
+ if (!observableStr.empty()) {
+ observableStr += ", ";
+ }
+ observableStr += toString(observable);
+ }
+ return "{" + eventStr + ", " + std::to_string(event.uid) + ", "
+ + std::to_string(event.pid) + ", {" + observableStr + "}}";
+ }
+
+ static Event Busy(int uid, int pid, const std::vector<MediaObservableParcel>& observables) {
+ return { Event::Busy, uid, pid, observables };
+ }
+ static Event Idle(int uid, int pid, const std::vector<MediaObservableParcel>& observables) {
+ return { Event::Idle, uid, pid, observables };
+ }
+
+ // Pop 1 event from front, wait for up to timeoutUs if empty.
+ const Event& pop(int64_t timeoutUs = 0) {
+ std::unique_lock lock(mLock);
+
+ if (mEventQueue.empty() && timeoutUs > 0) {
+ mCondition.wait_for(lock, std::chrono::microseconds(timeoutUs));
+ }
+
+ if (mEventQueue.empty()) {
+ mPoppedEvent = NoEvent;
+ } else {
+ mPoppedEvent = *mEventQueue.begin();
+ mEventQueue.pop_front();
+ }
+
+ return mPoppedEvent;
+ }
+
+ // Push 1 event to back.
+ void append(const Event& event) {
+ ALOGD("%s", toString(event).c_str());
+
+ std::unique_lock lock(mLock);
+
+ mEventQueue.push_back(event);
+ mCondition.notify_one();
+ }
+
+private:
+ std::mutex mLock;
+ std::condition_variable mCondition;
+ Event mPoppedEvent;
+ std::list<Event> mEventQueue;
+};
+
+const EventTracker::Event EventTracker::NoEvent;
+
+// Operators for GTest macros.
+bool operator==(const EventTracker::Event& lhs, const EventTracker::Event& rhs) {
+ return lhs.type == rhs.type && lhs.uid == rhs.uid && lhs.pid == rhs.pid &&
+ lhs.observables == rhs.observables;
+}
+
+std::ostream& operator<<(std::ostream& str, const EventTracker::Event& v) {
+ str << EventTracker::toString(v);
+ return str;
+}
+
+struct TestObserver : public BnResourceObserver, public EventTracker {
+ TestObserver(const char *name) : mName(name) {}
+ ~TestObserver() = default;
+ Status onStatusChanged(MediaObservableEvent event, int32_t uid, int32_t pid,
+ const std::vector<MediaObservableParcel>& observables) override {
+ ALOGD("%s: %s", mName.c_str(), __FUNCTION__);
+ if (event == MediaObservableEvent::kBusy) {
+ append(Busy(uid, pid, observables));
+ } else {
+ append(Idle(uid, pid, observables));
+ }
+
+ return Status::ok();
+ }
+ std::string mName;
+};
+
+class ResourceObserverServiceTest : public ResourceManagerServiceTestBase {
+public:
+ ResourceObserverServiceTest() : ResourceManagerServiceTestBase(),
+ mObserverService(::ndk::SharedRefBase::make<ResourceObserverService>()),
+ mTestObserver1(::ndk::SharedRefBase::make<TestObserver>("observer1")),
+ mTestObserver2(::ndk::SharedRefBase::make<TestObserver>("observer2")),
+ mTestObserver3(::ndk::SharedRefBase::make<TestObserver>("observer3")) {
+ mService->setObserverService(mObserverService);
+ }
+
+ void registerObservers(MediaObservableEvent filter = ALL) {
+ std::vector<MediaObservableFilter> filters1, filters2, filters3;
+ filters1 = {{MediaObservableType::kVideoSecureCodec, filter}};
+ filters2 = {{MediaObservableType::kVideoNonSecureCodec, filter}};
+ filters3 = {{MediaObservableType::kVideoSecureCodec, filter},
+ {MediaObservableType::kVideoNonSecureCodec, filter}};
+
+ // mTestObserver1 monitors secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver1, filters1).isOk());
+
+ // mTestObserver2 monitors non-secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver2, filters2).isOk());
+
+ // mTestObserver3 monitors both secure & non-secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver3, filters3).isOk());
+ }
+
+protected:
+ std::shared_ptr<ResourceObserverService> mObserverService;
+ std::shared_ptr<TestObserver> mTestObserver1;
+ std::shared_ptr<TestObserver> mTestObserver2;
+ std::shared_ptr<TestObserver> mTestObserver3;
+};
+
+TEST_F(ResourceObserverServiceTest, testRegisterObserver) {
+ std::vector<MediaObservableFilter> filters1;
+ Status status;
+
+ // Register with empty observables should fail.
+ status = mObserverService->registerObserver(mTestObserver1, filters1);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), BAD_VALUE);
+
+ // mTestObserver1 monitors secure video codecs.
+ filters1 = {{MediaObservableType::kVideoSecureCodec, ALL}};
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver1, filters1).isOk());
+
+ // Register duplicates should fail.
+ status = mObserverService->registerObserver(mTestObserver1, filters1);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), ALREADY_EXISTS);
+}
+
+TEST_F(ResourceObserverServiceTest, testUnregisterObserver) {
+ std::vector<MediaObservableFilter> filters1;
+ Status status;
+
+ // Unregister without registering first should fail.
+ status = mObserverService->unregisterObserver(mTestObserver1);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), NAME_NOT_FOUND);
+
+ // mTestObserver1 monitors secure video codecs.
+ filters1 = {{MediaObservableType::kVideoSecureCodec, ALL}};
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver1, filters1).isOk());
+ EXPECT_TRUE(mObserverService->unregisterObserver(mTestObserver1).isOk());
+
+ // Unregister again should fail.
+ status = mObserverService->unregisterObserver(mTestObserver1);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), NAME_NOT_FOUND);
+}
+
+TEST_F(ResourceObserverServiceTest, testAddResourceBasic) {
+ registerObservers();
+
+ std::vector<MediaObservableParcel> observables1, observables2, observables3;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+ // Add secure video codec.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+
+ // Add non-secure video codec.
+ resources = {MediaResource::CodecResource(0 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+
+ // Add secure & non-secure video codecs.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
+
+ // Add additional audio codecs, should be ignored.
+ resources.push_back(MediaResource::CodecResource(1 /*secure*/, 0 /*video*/));
+ resources.push_back(MediaResource::CodecResource(0 /*secure*/, 0 /*video*/));
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables3));
+}
+
+TEST_F(ResourceObserverServiceTest, testAddResourceMultiple) {
+ registerObservers();
+
+ std::vector<MediaObservableParcel> observables1, observables2, observables3;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+
+ // Add multiple secure & non-secure video codecs.
+ // Multiple entries of the same type should be merged, count should be propagated correctly.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/, 3 /*count*/)};
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 2}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 3}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 2},
+ {MediaObservableType::kVideoNonSecureCodec, 3}};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
+}
+
+TEST_F(ResourceObserverServiceTest, testRemoveResourceBasic) {
+ registerObservers();
+
+ std::vector<MediaObservableParcel> observables1, observables2, observables3;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+ // Add secure video codec to client1.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+ // Remove secure video codec. observer 1&3 should receive updates.
+ mService->removeResource(kTestPid1, getId(mTestClient1), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Idle(kTestUid1, kTestPid1, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid1, kTestPid1, observables1));
+ // Remove secure video codec again, should have no event.
+ mService->removeResource(kTestPid1, getId(mTestClient1), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+ // Remove client1, should have no event.
+ mService->removeClient(kTestPid1, getId(mTestClient1));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+
+ // Add non-secure video codec to client2.
+ resources = {MediaResource::CodecResource(0 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ // Remove client2, observer 2&3 should receive updates.
+ mService->removeClient(kTestPid2, getId(mTestClient2));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ // Remove non-secure codec after client2 removed, should have no event.
+ mService->removeResource(kTestPid2, getId(mTestClient2), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+ // Remove client2 again, should have no event.
+ mService->removeClient(kTestPid2, getId(mTestClient2));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+
+ // Add secure & non-secure video codecs, plus audio codecs (that's ignored).
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 0 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 0 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
+ // Remove one audio codec, should have no event.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 0 /*video*/)};
+ mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+ // Remove the other audio codec and the secure video codec, only secure video codec
+ // removal should be reported.
+ resources = {MediaResource::CodecResource(0 /*secure*/, 0 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 1 /*video*/)};
+ mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
+ // Remove client3 entirely. Non-secure video codec removal should be reported.
+ mService->removeClient(kTestPid2, getId(mTestClient3));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+}
+
+TEST_F(ResourceObserverServiceTest, testRemoveResourceMultiple) {
+ registerObservers();
+
+ std::vector<MediaObservableParcel> observables1, observables2, observables3;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+
+ // Add multiple secure & non-secure video codecs, plus audio codecs (that's ignored).
+ // (ResourceManager will merge these internally.)
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/, 4 /*count*/),
+ MediaResource::CodecResource(1 /*secure*/, 0 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 0 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 4}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 4}};
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
+ // Remove one audio codec, 2 secure video codecs and 2 non-secure video codecs.
+ // 1 secure video codec removal and 2 non-secure video codec removals should be reported.
+ resources = {MediaResource::CodecResource(0 /*secure*/, 0 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/, 2 /*count*/)};
+ mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 2}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 2}};
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables3));
+ // Remove client3 entirely. 2 non-secure video codecs removal should be reported.
+ mService->removeClient(kTestPid2, getId(mTestClient3));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+}
+
+TEST_F(ResourceObserverServiceTest, testEventFilters) {
+ // Register observers with different event filters.
+ std::vector<MediaObservableFilter> filters1, filters2, filters3;
+ filters1 = {{MediaObservableType::kVideoSecureCodec, BUSY}};
+ filters2 = {{MediaObservableType::kVideoNonSecureCodec, IDLE}};
+ filters3 = {{MediaObservableType::kVideoSecureCodec, IDLE},
+ {MediaObservableType::kVideoNonSecureCodec, BUSY}};
+
+ // mTestObserver1 monitors secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver1, filters1).isOk());
+
+ // mTestObserver2 monitors non-secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver2, filters2).isOk());
+
+ // mTestObserver3 monitors both secure & non-secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver3, filters3).isOk());
+
+ std::vector<MediaObservableParcel> observables1, observables2;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+
+ // Add secure & non-secure video codecs.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+
+ // Remove secure & non-secure video codecs.
+ mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
+}
+
+} // namespace android
diff --git a/services/mediaresourcemanager/test/build_and_run_all_unit_tests.sh b/services/mediaresourcemanager/test/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..1c4ae98
--- /dev/null
+++ b/services/mediaresourcemanager/test/build_and_run_all_unit_tests.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Run tests in this directory.
+#
+
+if [ "$SYNC_FINISHED" != true ]; then
+ if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+ fi
+
+ # ensure we have mm
+ . $ANDROID_BUILD_TOP/build/envsetup.sh
+
+ mm
+
+ echo "waiting for device"
+
+ adb root && adb wait-for-device remount && adb sync
+fi
+
+echo "========================================"
+
+echo "testing ResourceManagerService"
+#adb shell /data/nativetest64/ResourceManagerService_test/ResourceManagerService_test
+adb shell /data/nativetest/ResourceManagerService_test/ResourceManagerService_test
+
+echo "testing ServiceLog"
+#adb shell /data/nativetest64/ServiceLog_test/ServiceLog_test
+adb shell /data/nativetest/ServiceLog_test/ServiceLog_test
+
+echo "testing ResourceObserverService"
+#adb shell /data/nativetest64/ResourceObserverService_test/ResourceObserverService_test
+adb shell /data/nativetest/ResourceObserverService_test/ResourceObserverService_test
diff --git a/services/mediatranscoding/MediaTranscodingService.cpp b/services/mediatranscoding/MediaTranscodingService.cpp
index ef7d6d2..eb3a873 100644
--- a/services/mediatranscoding/MediaTranscodingService.cpp
+++ b/services/mediatranscoding/MediaTranscodingService.cpp
@@ -25,8 +25,8 @@
#include <media/TranscoderWrapper.h>
#include <media/TranscodingClientManager.h>
#include <media/TranscodingJobScheduler.h>
+#include <media/TranscodingResourcePolicy.h>
#include <media/TranscodingUidPolicy.h>
-#include <private/android_filesystem_config.h>
#include <utils/Log.h>
#include <utils/Vector.h>
@@ -40,28 +40,16 @@
errorCode, \
String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, ##__VA_ARGS__))
-// Can MediaTranscoding service trust the caller based on the calling UID?
-// TODO(hkuang): Add MediaProvider's UID.
-static bool isTrustedCallingUid(uid_t uid) {
- switch (uid) {
- case AID_ROOT: // root user
- case AID_SYSTEM:
- case AID_SHELL:
- case AID_MEDIA: // mediaserver
- return true;
- default:
- return false;
- }
-}
-
MediaTranscodingService::MediaTranscodingService(
const std::shared_ptr<TranscoderInterface>& transcoder)
: mUidPolicy(new TranscodingUidPolicy()),
- mJobScheduler(new TranscodingJobScheduler(transcoder, mUidPolicy)),
+ mResourcePolicy(new TranscodingResourcePolicy()),
+ mJobScheduler(new TranscodingJobScheduler(transcoder, mUidPolicy, mResourcePolicy)),
mClientManager(new TranscodingClientManager(mJobScheduler)) {
ALOGV("MediaTranscodingService is created");
transcoder->setCallback(mJobScheduler);
mUidPolicy->setCallback(mJobScheduler);
+ mResourcePolicy->setCallback(mJobScheduler);
}
MediaTranscodingService::~MediaTranscodingService() {
@@ -113,51 +101,18 @@
Status MediaTranscodingService::registerClient(
const std::shared_ptr<ITranscodingClientCallback>& in_callback,
- const std::string& in_clientName, const std::string& in_opPackageName, int32_t in_clientUid,
- int32_t in_clientPid, std::shared_ptr<ITranscodingClient>* _aidl_return) {
+ const std::string& in_clientName, const std::string& in_opPackageName,
+ std::shared_ptr<ITranscodingClient>* _aidl_return) {
if (in_callback == nullptr) {
*_aidl_return = nullptr;
return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Client callback cannot be null!");
}
- int32_t callingPid = AIBinder_getCallingPid();
- int32_t callingUid = AIBinder_getCallingUid();
-
- // Check if we can trust clientUid. Only privilege caller could forward the
- // uid on app client's behalf.
- if (in_clientUid == USE_CALLING_UID) {
- in_clientUid = callingUid;
- } else if (!isTrustedCallingUid(callingUid)) {
- ALOGE("MediaTranscodingService::registerClient failed (calling PID %d, calling UID %d) "
- "rejected "
- "(don't trust clientUid %d)",
- in_clientPid, in_clientUid, in_clientUid);
- return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
- "Untrusted caller (calling PID %d, UID %d) trying to "
- "register client",
- in_clientPid, in_clientUid);
- }
-
- // Check if we can trust clientPid. Only privilege caller could forward the
- // pid on app client's behalf.
- if (in_clientPid == USE_CALLING_PID) {
- in_clientPid = callingPid;
- } else if (!isTrustedCallingUid(callingUid)) {
- ALOGE("MediaTranscodingService::registerClient client failed (calling PID %d, calling UID "
- "%d) rejected "
- "(don't trust clientPid %d)",
- in_clientPid, in_clientUid, in_clientPid);
- return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
- "Untrusted caller (calling PID %d, UID %d) trying to "
- "register client",
- in_clientPid, in_clientUid);
- }
-
// Creates the client and uses its process id as client id.
std::shared_ptr<ITranscodingClient> newClient;
- status_t err = mClientManager->addClient(in_callback, in_clientPid, in_clientUid, in_clientName,
- in_opPackageName, &newClient);
+ status_t err =
+ mClientManager->addClient(in_callback, in_clientName, in_opPackageName, &newClient);
if (err != OK) {
*_aidl_return = nullptr;
return STATUS_ERROR_FMT(err, "Failed to add client to TranscodingClientManager");
diff --git a/services/mediatranscoding/MediaTranscodingService.h b/services/mediatranscoding/MediaTranscodingService.h
index 505239c..0fe6864 100644
--- a/services/mediatranscoding/MediaTranscodingService.h
+++ b/services/mediatranscoding/MediaTranscodingService.h
@@ -32,6 +32,7 @@
class TranscodingJobScheduler;
class TranscoderInterface;
class UidPolicyInterface;
+class ResourcePolicyInterface;
class MediaTranscodingService : public BnMediaTranscodingService {
public:
@@ -47,7 +48,6 @@
Status registerClient(const std::shared_ptr<ITranscodingClientCallback>& in_callback,
const std::string& in_clientName, const std::string& in_opPackageName,
- int32_t in_clientUid, int32_t in_clientPid,
std::shared_ptr<ITranscodingClient>* _aidl_return) override;
Status getNumOfClients(int32_t* _aidl_return) override;
@@ -60,6 +60,7 @@
mutable std::mutex mServiceLock;
std::shared_ptr<UidPolicyInterface> mUidPolicy;
+ std::shared_ptr<ResourcePolicyInterface> mResourcePolicy;
std::shared_ptr<TranscodingJobScheduler> mJobScheduler;
std::shared_ptr<TranscodingClientManager> mClientManager;
};
diff --git a/services/mediatranscoding/tests/Android.bp b/services/mediatranscoding/tests/Android.bp
index 364a198..6497685 100644
--- a/services/mediatranscoding/tests/Android.bp
+++ b/services/mediatranscoding/tests/Android.bp
@@ -25,14 +25,6 @@
static_libs: [
"mediatranscoding_aidl_interface-ndk_platform",
],
-}
-
-// MediaTranscodingService unit test using simulated transcoder
-cc_test {
- name: "mediatranscodingservice_simulated_tests",
- defaults: ["mediatranscodingservice_test_defaults"],
-
- srcs: ["mediatranscodingservice_simulated_tests.cpp"],
required: [
"TranscodingUidPolicy_TestAppA",
@@ -41,6 +33,14 @@
],
}
+// MediaTranscodingService unit test using simulated transcoder
+cc_test {
+ name: "mediatranscodingservice_simulated_tests",
+ defaults: ["mediatranscodingservice_test_defaults"],
+
+ srcs: ["mediatranscodingservice_simulated_tests.cpp"],
+}
+
// MediaTranscodingService unit test using real transcoder
cc_test {
name: "mediatranscodingservice_real_tests",
@@ -48,3 +48,11 @@
srcs: ["mediatranscodingservice_real_tests.cpp"],
}
+
+// MediaTranscodingService unit test related to resource management
+cc_test {
+ name: "mediatranscodingservice_resource_tests",
+ defaults: ["mediatranscodingservice_test_defaults"],
+
+ srcs: ["mediatranscodingservice_resource_tests.cpp"],
+}
diff --git a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
index 53fd7ec..0af572e 100644
--- a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
+++ b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
@@ -58,6 +58,9 @@
constexpr uid_t kClientUid = 5000;
#define UID(n) (kClientUid + (n))
+constexpr pid_t kClientPid = 10000;
+#define PID(n) (kClientPid + (n))
+
constexpr int32_t kClientId = 0;
#define CLIENT(n) (kClientId + (n))
@@ -168,6 +171,32 @@
return mPoppedEvent;
}
+ bool waitForSpecificEventAndPop(const Event& target, std::list<Event>* outEvents,
+ int64_t timeoutUs = 0) {
+ std::unique_lock lock(mLock);
+
+ auto startTime = std::chrono::system_clock::now();
+
+ std::list<Event>::iterator it;
+ while (((it = std::find(mEventQueue.begin(), mEventQueue.end(), target)) ==
+ mEventQueue.end()) &&
+ timeoutUs > 0) {
+ std::cv_status status = mCondition.wait_for(lock, std::chrono::microseconds(timeoutUs));
+ if (status == std::cv_status::timeout) {
+ break;
+ }
+ std::chrono::microseconds elapsedTime = std::chrono::system_clock::now() - startTime;
+ timeoutUs -= elapsedTime.count();
+ }
+
+ if (it == mEventQueue.end()) {
+ return false;
+ }
+ *outEvents = std::list<Event>(mEventQueue.begin(), std::next(it));
+ mEventQueue.erase(mEventQueue.begin(), std::next(it));
+ return true;
+ }
+
// Push 1 event to back.
void append(const Event& event,
const TranscodingErrorCode err = TranscodingErrorCode::kNoError) {
@@ -186,7 +215,7 @@
mUpdateCount++;
}
- int getUpdateCount(int *lastProgress) {
+ int getUpdateCount(int* lastProgress) {
std::unique_lock lock(mLock);
*lastProgress = mLastProgress;
return mUpdateCount;
@@ -217,9 +246,21 @@
return str;
}
-struct TestClientCallback : public BnTranscodingClientCallback, public EventTracker {
- TestClientCallback(int32_t id) : mClientId(id) {
- ALOGI("TestClientCallback %d Created", mClientId);
+static constexpr bool success = true;
+static constexpr bool fail = false;
+
+struct TestClientCallback : public BnTranscodingClientCallback,
+ public EventTracker,
+ public std::enable_shared_from_this<TestClientCallback> {
+ TestClientCallback(const char* packageName, int32_t id)
+ : mClientId(id), mClientPid(PID(id)), mClientUid(UID(id)), mPackageName(packageName) {
+ ALOGI("TestClientCallback %d created: pid %d, uid %d", id, PID(id), UID(id));
+
+ // Use package uid if that's available.
+ uid_t packageUid;
+ if (getUidForPackage(String16(packageName), 0 /*userId*/, packageUid) == NO_ERROR) {
+ mClientUid = packageUid;
+ }
}
virtual ~TestClientCallback() { ALOGI("TestClientCallback %d destroyed", mClientId); }
@@ -285,7 +326,99 @@
return Status::ok();
}
+ Status registerClient(const char* packageName,
+ const std::shared_ptr<IMediaTranscodingService>& service) {
+ // Override the default uid if the package uid is found.
+ uid_t uid;
+ if (getUidForPackage(String16(packageName), 0 /*userId*/, uid) == NO_ERROR) {
+ mClientUid = uid;
+ }
+
+ ALOGD("registering %s with uid %d", packageName, mClientUid);
+
+ std::shared_ptr<ITranscodingClient> client;
+ Status status =
+ service->registerClient(shared_from_this(), kClientName, packageName, &client);
+
+ mClient = status.isOk() ? client : nullptr;
+ return status;
+ }
+
+ Status unregisterClient() {
+ Status status;
+ if (mClient != nullptr) {
+ status = mClient->unregister();
+ mClient = nullptr;
+ }
+ return status;
+ }
+
+ template <bool expectation = success>
+ bool submit(int32_t jobId, const char* sourceFilePath, const char* destinationFilePath,
+ TranscodingJobPriority priority = TranscodingJobPriority::kNormal,
+ int bitrateBps = -1, int overridePid = -1, int overrideUid = -1) {
+ constexpr bool shouldSucceed = (expectation == success);
+ bool result;
+ TranscodingRequestParcel request;
+ TranscodingJobParcel job;
+
+ request.sourceFilePath = sourceFilePath;
+ request.destinationFilePath = destinationFilePath;
+ request.priority = priority;
+ request.clientPid = (overridePid == -1) ? mClientPid : overridePid;
+ request.clientUid = (overrideUid == -1) ? mClientUid : overrideUid;
+ if (bitrateBps > 0) {
+ request.requestedVideoTrackFormat.emplace(TranscodingVideoTrackFormat());
+ request.requestedVideoTrackFormat->bitrateBps = bitrateBps;
+ }
+ Status status = mClient->submitRequest(request, &job, &result);
+
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(result, shouldSucceed);
+ if (shouldSucceed) {
+ EXPECT_EQ(job.jobId, jobId);
+ }
+
+ return status.isOk() && (result == shouldSucceed) && (!shouldSucceed || job.jobId == jobId);
+ }
+
+ template <bool expectation = success>
+ bool cancel(int32_t jobId) {
+ constexpr bool shouldSucceed = (expectation == success);
+ bool result;
+ Status status = mClient->cancelJob(jobId, &result);
+
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(result, shouldSucceed);
+
+ return status.isOk() && (result == shouldSucceed);
+ }
+
+ template <bool expectation = success>
+ bool getJob(int32_t jobId, const char* sourceFilePath, const char* destinationFilePath) {
+ constexpr bool shouldSucceed = (expectation == success);
+ bool result;
+ TranscodingJobParcel job;
+ Status status = mClient->getJobWithId(jobId, &job, &result);
+
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(result, shouldSucceed);
+ if (shouldSucceed) {
+ EXPECT_EQ(job.jobId, jobId);
+ EXPECT_EQ(job.request.sourceFilePath, sourceFilePath);
+ }
+
+ return status.isOk() && (result == shouldSucceed) &&
+ (!shouldSucceed ||
+ (job.jobId == jobId && job.request.sourceFilePath == sourceFilePath &&
+ job.request.destinationFilePath == destinationFilePath));
+ }
+
int32_t mClientId;
+ pid_t mClientPid;
+ uid_t mClientUid;
+ std::string mPackageName;
+ std::shared_ptr<ITranscodingClient> mClient;
};
class MediaTranscodingServiceTestBase : public ::testing::Test {
@@ -306,37 +439,31 @@
ALOGE("Failed to connect to the media.trascoding service.");
return;
}
- mClientCallback1 = ::ndk::SharedRefBase::make<TestClientCallback>(CLIENT(1));
- mClientCallback2 = ::ndk::SharedRefBase::make<TestClientCallback>(CLIENT(2));
- mClientCallback3 = ::ndk::SharedRefBase::make<TestClientCallback>(CLIENT(3));
+ mClient1 = ::ndk::SharedRefBase::make<TestClientCallback>(kClientPackageA, 1);
+ mClient2 = ::ndk::SharedRefBase::make<TestClientCallback>(kClientPackageB, 2);
+ mClient3 = ::ndk::SharedRefBase::make<TestClientCallback>(kClientPackageC, 3);
}
- std::shared_ptr<ITranscodingClient> registerOneClient(
- const char* packageName, const std::shared_ptr<TestClientCallback>& callback,
- uid_t defaultUid) {
- uid_t uid;
- if (getUidForPackage(String16(packageName), 0 /*userId*/, uid) != NO_ERROR) {
- uid = defaultUid;
- }
-
- ALOGD("registering %s with uid %d", packageName, uid);
+ Status registerOneClient(const std::shared_ptr<TestClientCallback>& callback) {
+ ALOGD("registering %s with uid %d", callback->mPackageName.c_str(), callback->mClientUid);
std::shared_ptr<ITranscodingClient> client;
- Status status = mService->registerClient(callback, kClientName, packageName, uid,
- kClientUseCallingPid, &client);
- return status.isOk() ? client : nullptr;
+ Status status =
+ mService->registerClient(callback, kClientName, callback->mPackageName, &client);
+
+ if (status.isOk()) {
+ callback->mClient = client;
+ } else {
+ callback->mClient = nullptr;
+ }
+ return status;
}
void registerMultipleClients() {
// Register 3 clients.
- mClient1 = registerOneClient(kClientPackageA, mClientCallback1, UID(1));
- EXPECT_TRUE(mClient1 != nullptr);
-
- mClient2 = registerOneClient(kClientPackageB, mClientCallback2, UID(2));
- EXPECT_TRUE(mClient2 != nullptr);
-
- mClient3 = registerOneClient(kClientPackageC, mClientCallback3, UID(3));
- EXPECT_TRUE(mClient3 != nullptr);
+ EXPECT_TRUE(registerOneClient(mClient1).isOk());
+ EXPECT_TRUE(registerOneClient(mClient2).isOk());
+ EXPECT_TRUE(registerOneClient(mClient3).isOk());
// Check the number of clients.
int32_t numOfClients;
@@ -346,99 +473,24 @@
}
void unregisterMultipleClients() {
- Status status;
-
// Unregister the clients.
- status = mClient1->unregister();
- EXPECT_TRUE(status.isOk());
-
- status = mClient2->unregister();
- EXPECT_TRUE(status.isOk());
-
- status = mClient3->unregister();
- EXPECT_TRUE(status.isOk());
+ EXPECT_TRUE(mClient1->unregisterClient().isOk());
+ EXPECT_TRUE(mClient2->unregisterClient().isOk());
+ EXPECT_TRUE(mClient3->unregisterClient().isOk());
// Check the number of clients.
int32_t numOfClients;
- status = mService->getNumOfClients(&numOfClients);
+ Status status = mService->getNumOfClients(&numOfClients);
EXPECT_TRUE(status.isOk());
EXPECT_EQ(0, numOfClients);
}
- static constexpr bool success = true;
- static constexpr bool fail = false;
-
- template <bool expectation = success>
- bool submit(const std::shared_ptr<ITranscodingClient>& client, int32_t jobId,
- const char* sourceFilePath, const char* destinationFilePath,
- TranscodingJobPriority priority = TranscodingJobPriority::kNormal,
- int bitrateBps = -1) {
- constexpr bool shouldSucceed = (expectation == success);
- bool result;
- TranscodingRequestParcel request;
- TranscodingJobParcel job;
-
- request.sourceFilePath = sourceFilePath;
- request.destinationFilePath = destinationFilePath;
- request.priority = priority;
- if (bitrateBps > 0) {
- request.requestedVideoTrackFormat.emplace(TranscodingVideoTrackFormat());
- request.requestedVideoTrackFormat->bitrateBps = bitrateBps;
- }
- Status status = client->submitRequest(request, &job, &result);
-
- EXPECT_TRUE(status.isOk());
- EXPECT_EQ(result, shouldSucceed);
- if (shouldSucceed) {
- EXPECT_EQ(job.jobId, jobId);
- }
-
- return status.isOk() && (result == shouldSucceed) && (!shouldSucceed || job.jobId == jobId);
- }
-
- template <bool expectation = success>
- bool cancel(const std::shared_ptr<ITranscodingClient>& client, int32_t jobId) {
- constexpr bool shouldSucceed = (expectation == success);
- bool result;
- Status status = client->cancelJob(jobId, &result);
-
- EXPECT_TRUE(status.isOk());
- EXPECT_EQ(result, shouldSucceed);
-
- return status.isOk() && (result == shouldSucceed);
- }
-
- template <bool expectation = success>
- bool getJob(const std::shared_ptr<ITranscodingClient>& client, int32_t jobId,
- const char* sourceFilePath, const char* destinationFilePath) {
- constexpr bool shouldSucceed = (expectation == success);
- bool result;
- TranscodingJobParcel job;
- Status status = client->getJobWithId(jobId, &job, &result);
-
- EXPECT_TRUE(status.isOk());
- EXPECT_EQ(result, shouldSucceed);
- if (shouldSucceed) {
- EXPECT_EQ(job.jobId, jobId);
- EXPECT_EQ(job.request.sourceFilePath, sourceFilePath);
- }
-
- return status.isOk() && (result == shouldSucceed) &&
- (!shouldSucceed ||
- (job.jobId == jobId && job.request.sourceFilePath == sourceFilePath &&
- job.request.destinationFilePath == destinationFilePath));
- }
-
void deleteFile(const char* path) { unlink(path); }
std::shared_ptr<IMediaTranscodingService> mService;
- std::shared_ptr<TestClientCallback> mClientCallback1;
- std::shared_ptr<TestClientCallback> mClientCallback2;
- std::shared_ptr<TestClientCallback> mClientCallback3;
- std::shared_ptr<ITranscodingClient> mClient1;
- std::shared_ptr<ITranscodingClient> mClient2;
- std::shared_ptr<ITranscodingClient> mClient3;
- const char* mTestName;
+ std::shared_ptr<TestClientCallback> mClient1;
+ std::shared_ptr<TestClientCallback> mClient2;
+ std::shared_ptr<TestClientCallback> mClient3;
};
} // namespace media
diff --git a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppA.xml b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppA.xml
index 91c14a5..0dff171 100644
--- a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppA.xml
+++ b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppA.xml
@@ -28,6 +28,14 @@
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
+ <activity android:name="com.android.tests.transcoding.ResourcePolicyTestActivity"
+ android:exported="true">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.DEFAULT"/>
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
</application>
</manifest>
diff --git a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/MainActivity.java b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/MainActivity.java
index 7295073..b79164d 100644
--- a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/MainActivity.java
+++ b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/MainActivity.java
@@ -46,7 +46,7 @@
// Called before subsequent visible lifetimes
// for an activity process.
@Override
- public void onRestart(){
+ public void onRestart() {
super.onRestart();
// Load changes knowing that the Activity has already
// been visible within this process.
@@ -54,14 +54,14 @@
// Called at the start of the visible lifetime.
@Override
- public void onStart(){
+ public void onStart() {
super.onStart();
// Apply any required UI change now that the Activity is visible.
}
// Called at the start of the active lifetime.
@Override
- public void onResume(){
+ public void onResume() {
super.onResume();
// Resume any paused UI updates, threads, or processes required
// by the Activity but suspended when it was inactive.
@@ -80,7 +80,7 @@
// Called at the end of the active lifetime.
@Override
- public void onPause(){
+ public void onPause() {
// Suspend UI updates, threads, or CPU intensive processes
// that don't need to be updated when the Activity isn't
// the active foreground Activity.
@@ -89,7 +89,7 @@
// Called at the end of the visible lifetime.
@Override
- public void onStop(){
+ public void onStop() {
// Suspend remaining UI updates, threads, or processing
// that aren't required when the Activity isn't visible.
// Persist all edits or state changes
@@ -99,10 +99,9 @@
// Sometimes called at the end of the full lifetime.
@Override
- public void onDestroy(){
+ public void onDestroy() {
// Clean up any resources including ending threads,
// closing database connections etc.
super.onDestroy();
}
-
}
diff --git a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/ResourcePolicyTestActivity.java b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/ResourcePolicyTestActivity.java
new file mode 100644
index 0000000..c9e2ddb
--- /dev/null
+++ b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/ResourcePolicyTestActivity.java
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.tests.transcoding;
+
+import android.app.Activity;
+import android.media.MediaCodec;
+import android.media.MediaCodecInfo;
+import android.media.MediaCodecInfo.CodecCapabilities;
+import android.media.MediaCodecInfo.VideoCapabilities;
+import android.media.MediaCodecList;
+import android.media.MediaFormat;
+import android.os.Bundle;
+import android.util.Log;
+import java.io.IOException;
+import java.util.Vector;
+
+public class ResourcePolicyTestActivity extends Activity {
+ public static final int TYPE_NONSECURE = 0;
+ public static final int TYPE_SECURE = 1;
+ public static final int TYPE_MIX = 2;
+
+ protected String TAG;
+ private static final int FRAME_RATE = 10;
+ private static final int IFRAME_INTERVAL = 10; // 10 seconds between I-frames
+ private static final String MIME = MediaFormat.MIMETYPE_VIDEO_AVC;
+ private static final int TIMEOUT_MS = 5000;
+
+ private Vector<MediaCodec> mCodecs = new Vector<MediaCodec>();
+
+ private class TestCodecCallback extends MediaCodec.Callback {
+ @Override
+ public void onInputBufferAvailable(MediaCodec codec, int index) {
+ Log.d(TAG, "onInputBufferAvailable " + codec.toString());
+ }
+
+ @Override
+ public void onOutputBufferAvailable(
+ MediaCodec codec, int index, MediaCodec.BufferInfo info) {
+ Log.d(TAG, "onOutputBufferAvailable " + codec.toString());
+ }
+
+ @Override
+ public void onError(MediaCodec codec, MediaCodec.CodecException e) {
+ Log.d(TAG, "onError " + codec.toString() + " errorCode " + e.getErrorCode());
+ }
+
+ @Override
+ public void onOutputFormatChanged(MediaCodec codec, MediaFormat format) {
+ Log.d(TAG, "onOutputFormatChanged " + codec.toString());
+ }
+ }
+
+ private MediaCodec.Callback mCallback = new TestCodecCallback();
+
+ private MediaFormat getTestFormat(CodecCapabilities caps, boolean securePlayback) {
+ VideoCapabilities vcaps = caps.getVideoCapabilities();
+ int width = vcaps.getSupportedWidths().getLower();
+ int height = vcaps.getSupportedHeightsFor(width).getLower();
+ int bitrate = vcaps.getBitrateRange().getLower();
+
+ MediaFormat format = MediaFormat.createVideoFormat(MIME, width, height);
+ format.setInteger(MediaFormat.KEY_COLOR_FORMAT, caps.colorFormats[0]);
+ format.setInteger(MediaFormat.KEY_BIT_RATE, bitrate);
+ format.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE);
+ format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, IFRAME_INTERVAL);
+ format.setFeatureEnabled(CodecCapabilities.FEATURE_SecurePlayback, securePlayback);
+ return format;
+ }
+
+ private MediaCodecInfo getTestCodecInfo(boolean securePlayback) {
+ // Use avc decoder for testing.
+ boolean isEncoder = false;
+
+ MediaCodecList mcl = new MediaCodecList(MediaCodecList.ALL_CODECS);
+ for (MediaCodecInfo info : mcl.getCodecInfos()) {
+ if (info.isEncoder() != isEncoder) {
+ continue;
+ }
+ CodecCapabilities caps;
+ try {
+ caps = info.getCapabilitiesForType(MIME);
+ boolean securePlaybackSupported =
+ caps.isFeatureSupported(CodecCapabilities.FEATURE_SecurePlayback);
+ boolean securePlaybackRequired =
+ caps.isFeatureRequired(CodecCapabilities.FEATURE_SecurePlayback);
+ if ((securePlayback && securePlaybackSupported)
+ || (!securePlayback && !securePlaybackRequired)) {
+ Log.d(TAG, "securePlayback " + securePlayback + " will use " + info.getName());
+ } else {
+ Log.d(TAG, "securePlayback " + securePlayback + " skip " + info.getName());
+ continue;
+ }
+ } catch (IllegalArgumentException e) {
+ // mime is not supported
+ continue;
+ }
+ return info;
+ }
+
+ return null;
+ }
+
+ protected int allocateCodecs(int max) {
+ Bundle extras = getIntent().getExtras();
+ int type = TYPE_NONSECURE;
+ if (extras != null) {
+ type = extras.getInt("test-type", type);
+ Log.d(TAG, "type is: " + type);
+ }
+
+ boolean shouldSkip = false;
+ boolean securePlayback;
+ if (type == TYPE_NONSECURE || type == TYPE_MIX) {
+ securePlayback = false;
+ MediaCodecInfo info = getTestCodecInfo(securePlayback);
+ if (info != null) {
+ allocateCodecs(max, info, securePlayback);
+ } else {
+ shouldSkip = true;
+ }
+ }
+
+ if (!shouldSkip) {
+ if (type == TYPE_SECURE || type == TYPE_MIX) {
+ securePlayback = true;
+ MediaCodecInfo info = getTestCodecInfo(securePlayback);
+ if (info != null) {
+ allocateCodecs(max, info, securePlayback);
+ } else {
+ shouldSkip = true;
+ }
+ }
+ }
+
+ if (shouldSkip) {
+ Log.d(TAG, "test skipped as there's no supported codec.");
+ finishWithResult(RESULT_OK);
+ }
+
+ Log.d(TAG, "allocateCodecs returned " + mCodecs.size());
+ return mCodecs.size();
+ }
+
+ protected void allocateCodecs(int max, MediaCodecInfo info, boolean securePlayback) {
+ String name = info.getName();
+ CodecCapabilities caps = info.getCapabilitiesForType(MIME);
+ MediaFormat format = getTestFormat(caps, securePlayback);
+ MediaCodec codec = null;
+ for (int i = mCodecs.size(); i < max; ++i) {
+ try {
+ Log.d(TAG, "Create codec " + name + " #" + i);
+ codec = MediaCodec.createByCodecName(name);
+ codec.setCallback(mCallback);
+ Log.d(TAG, "Configure codec " + format);
+ codec.configure(format, null, null, 0);
+ Log.d(TAG, "Start codec " + format);
+ codec.start();
+ mCodecs.add(codec);
+ codec = null;
+ } catch (IllegalArgumentException e) {
+ Log.d(TAG, "IllegalArgumentException " + e.getMessage());
+ break;
+ } catch (IOException e) {
+ Log.d(TAG, "IOException " + e.getMessage());
+ break;
+ } catch (MediaCodec.CodecException e) {
+ Log.d(TAG, "CodecException 0x" + Integer.toHexString(e.getErrorCode()));
+ break;
+ } finally {
+ if (codec != null) {
+ Log.d(TAG, "release codec");
+ codec.release();
+ codec = null;
+ }
+ }
+ }
+ }
+
+ protected void finishWithResult(int result) {
+ for (int i = 0; i < mCodecs.size(); ++i) {
+ Log.d(TAG, "release codec #" + i);
+ mCodecs.get(i).release();
+ }
+ mCodecs.clear();
+ setResult(result);
+ finish();
+ Log.d(TAG, "activity finished");
+ }
+
+ private void doUseCodecs() {
+ int current = 0;
+ try {
+ for (current = 0; current < mCodecs.size(); ++current) {
+ mCodecs.get(current).getName();
+ }
+ } catch (MediaCodec.CodecException e) {
+ Log.d(TAG, "useCodecs got CodecException 0x" + Integer.toHexString(e.getErrorCode()));
+ if (e.getErrorCode() == MediaCodec.CodecException.ERROR_RECLAIMED) {
+ Log.d(TAG, "Remove codec " + current + " from the list");
+ mCodecs.get(current).release();
+ mCodecs.remove(current);
+ mGotReclaimedException = true;
+ mUseCodecs = false;
+ }
+ return;
+ }
+ }
+
+ private Thread mWorkerThread;
+ private volatile boolean mUseCodecs = true;
+ private volatile boolean mGotReclaimedException = false;
+ protected void useCodecs() {
+ mWorkerThread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ long start = System.currentTimeMillis();
+ long timeSinceStartedMs = 0;
+ while (mUseCodecs && (timeSinceStartedMs < TIMEOUT_MS)) {
+ doUseCodecs();
+ try {
+ Thread.sleep(50 /* millis */);
+ } catch (InterruptedException e) {
+ }
+ timeSinceStartedMs = System.currentTimeMillis() - start;
+ }
+ if (mGotReclaimedException) {
+ Log.d(TAG, "Got expected reclaim exception.");
+ }
+ finishWithResult(RESULT_OK);
+ }
+ });
+ mWorkerThread.start();
+ }
+
+ private static final int MAX_INSTANCES = 32;
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ TAG = "ResourcePolicyTestActivity";
+
+ Log.d(TAG, "onCreate called.");
+ super.onCreate(savedInstanceState);
+
+ if (allocateCodecs(MAX_INSTANCES) == MAX_INSTANCES) {
+ // haven't reached the limit with MAX_INSTANCES, no need to wait for reclaim exception.
+ //mWaitForReclaim = false;
+ Log.d(TAG, "Didn't hit resource limitation");
+ }
+
+ useCodecs();
+ }
+
+ @Override
+ protected void onDestroy() {
+ Log.d(TAG, "onDestroy called.");
+ super.onDestroy();
+ }
+}
diff --git a/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh b/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh
index d66b340..1b42a22 100755
--- a/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh
+++ b/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh
@@ -37,6 +37,11 @@
#adb shell /data/nativetest64/mediatranscodingservice_real_tests/mediatranscodingservice_real_tests
adb shell /data/nativetest/mediatranscodingservice_real_tests/mediatranscodingservice_real_tests
+echo "[==========] running resource tests"
+adb shell kill -9 `pid media.transcoding`
+#adb shell /data/nativetest64/mediatranscodingservice_resource_tests/mediatranscodingservice_resource_tests
+adb shell /data/nativetest/mediatranscodingservice_resource_tests/mediatranscodingservice_resource_tests
+
echo "[==========] removing debug properties"
adb shell setprop debug.transcoding.simulated_transcoder \"\"
adb shell kill -9 `pid media.transcoding`
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
index 1def4b9..381bbf5 100644
--- a/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
+++ b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
@@ -45,9 +45,11 @@
class MediaTranscodingServiceRealTest : public MediaTranscodingServiceTestBase {
public:
- MediaTranscodingServiceRealTest() {}
+ MediaTranscodingServiceRealTest() { ALOGI("MediaTranscodingServiceResourceTest created"); }
- void deleteFile(const char* path) { unlink(path); }
+ virtual ~MediaTranscodingServiceRealTest() {
+ ALOGI("MediaTranscodingServiceResourceTest destroyed");
+ }
};
TEST_F(MediaTranscodingServiceRealTest, TestInvalidSource) {
@@ -58,11 +60,11 @@
deleteFile(dstPath);
// Submit one job.
- EXPECT_TRUE(submit(mClient1, 0, srcPath, dstPath, TranscodingJobPriority::kNormal, kBitRate));
+ EXPECT_TRUE(mClient1->submit(0, srcPath, dstPath, TranscodingJobPriority::kNormal, kBitRate));
// Check expected error.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Failed(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->getLastError(), TranscodingErrorCode::kErrorIO);
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Failed(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->getLastError(), TranscodingErrorCode::kErrorIO);
unregisterMultipleClients();
}
@@ -74,11 +76,11 @@
deleteFile(dstPath);
// Submit one job.
- EXPECT_TRUE(submit(mClient1, 0, kShortSrcPath, dstPath));
+ EXPECT_TRUE(mClient1->submit(0, kShortSrcPath, dstPath));
// Wait for job to finish.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
unregisterMultipleClients();
}
@@ -91,11 +93,11 @@
// Submit one job.
EXPECT_TRUE(
- submit(mClient1, 0, kShortSrcPath, dstPath, TranscodingJobPriority::kNormal, kBitRate));
+ mClient1->submit(0, kShortSrcPath, dstPath, TranscodingJobPriority::kNormal, kBitRate));
// Wait for job to finish.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
unregisterMultipleClients();
}
@@ -108,16 +110,16 @@
// Submit one job.
EXPECT_TRUE(
- submit(mClient1, 0, kLongSrcPath, dstPath, TranscodingJobPriority::kNormal, kBitRate));
+ mClient1->submit(0, kLongSrcPath, dstPath, TranscodingJobPriority::kNormal, kBitRate));
// Wait for job to finish.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
// Check the progress update messages are received. For this clip (around ~15 second long),
// expect at least 10 updates, and the last update should be 100.
int lastProgress;
- EXPECT_GE(mClientCallback1->getUpdateCount(&lastProgress), 10);
+ EXPECT_GE(mClient1->getUpdateCount(&lastProgress), 10);
EXPECT_EQ(lastProgress, 100);
unregisterMultipleClients();
@@ -137,18 +139,18 @@
deleteFile(dstPath0);
deleteFile(dstPath1);
// Submit one job, should start immediately.
- EXPECT_TRUE(submit(mClient1, 0, srcPath0, dstPath0, TranscodingJobPriority::kNormal, kBitRate));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
- EXPECT_TRUE(getJob(mClient1, 0, srcPath0, dstPath0));
+ EXPECT_TRUE(mClient1->submit(0, srcPath0, dstPath0, TranscodingJobPriority::kNormal, kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_TRUE(mClient1->getJob(0, srcPath0, dstPath0));
// Test cancel job immediately, getJob should fail after cancel.
- EXPECT_TRUE(cancel(mClient1, 0));
- EXPECT_TRUE(getJob<fail>(mClient1, 0, "", ""));
+ EXPECT_TRUE(mClient1->cancel(0));
+ EXPECT_TRUE(mClient1->getJob<fail>(0, "", ""));
// Submit new job, new job should start immediately and finish.
- EXPECT_TRUE(submit(mClient1, 1, srcPath1, dstPath1, TranscodingJobPriority::kNormal, kBitRate));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+ EXPECT_TRUE(mClient1->submit(1, srcPath1, dstPath1, TranscodingJobPriority::kNormal, kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
unregisterMultipleClients();
}
@@ -167,20 +169,20 @@
deleteFile(dstPath0);
deleteFile(dstPath1);
// Submit two jobs, job 0 should start immediately, job 1 should be queued.
- EXPECT_TRUE(submit(mClient1, 0, srcPath0, dstPath0, TranscodingJobPriority::kNormal, kBitRate));
- EXPECT_TRUE(submit(mClient1, 1, srcPath1, dstPath1, TranscodingJobPriority::kNormal, kBitRate));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
- EXPECT_TRUE(getJob(mClient1, 0, srcPath0, dstPath0));
- EXPECT_TRUE(getJob(mClient1, 1, srcPath1, dstPath1));
+ EXPECT_TRUE(mClient1->submit(0, srcPath0, dstPath0, TranscodingJobPriority::kNormal, kBitRate));
+ EXPECT_TRUE(mClient1->submit(1, srcPath1, dstPath1, TranscodingJobPriority::kNormal, kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_TRUE(mClient1->getJob(0, srcPath0, dstPath0));
+ EXPECT_TRUE(mClient1->getJob(1, srcPath1, dstPath1));
// Job 0 (longtest) shouldn't finish in 1 seconds.
- EXPECT_EQ(mClientCallback1->pop(1000000), EventTracker::NoEvent);
+ EXPECT_EQ(mClient1->pop(1000000), EventTracker::NoEvent);
// Now cancel job 0. Job 1 should start immediately and finish.
- EXPECT_TRUE(cancel(mClient1, 0));
- EXPECT_TRUE(getJob<fail>(mClient1, 0, "", ""));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+ EXPECT_TRUE(mClient1->cancel(0));
+ EXPECT_TRUE(mClient1->getJob<fail>(0, "", ""));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
unregisterMultipleClients();
}
@@ -196,35 +198,35 @@
deleteFile(dstPath1);
// Submit one offline job, should start immediately.
- EXPECT_TRUE(submit(mClient1, 0, srcPath0, dstPath0, TranscodingJobPriority::kUnspecified,
- kBitRate));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_TRUE(mClient1->submit(0, srcPath0, dstPath0, TranscodingJobPriority::kUnspecified,
+ kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
// Test get job after starts.
- EXPECT_TRUE(getJob(mClient1, 0, srcPath0, dstPath0));
+ EXPECT_TRUE(mClient1->getJob(0, srcPath0, dstPath0));
// Submit one realtime job.
- EXPECT_TRUE(submit(mClient1, 1, srcPath1, dstPath1, TranscodingJobPriority::kNormal, kBitRate));
+ EXPECT_TRUE(mClient1->submit(1, srcPath1, dstPath1, TranscodingJobPriority::kNormal, kBitRate));
// Offline job should pause.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
- EXPECT_TRUE(getJob(mClient1, 0, srcPath0, dstPath0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
+ EXPECT_TRUE(mClient1->getJob(0, srcPath0, dstPath0));
// Realtime job should start immediately, and run to finish.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
// Test get job after finish fails.
- EXPECT_TRUE(getJob<fail>(mClient1, 1, "", ""));
+ EXPECT_TRUE(mClient1->getJob<fail>(1, "", ""));
// Then offline job should resume.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
// Test get job after resume.
- EXPECT_TRUE(getJob(mClient1, 0, srcPath0, dstPath0));
+ EXPECT_TRUE(mClient1->getJob(0, srcPath0, dstPath0));
// Offline job should finish.
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
// Test get job after finish fails.
- EXPECT_TRUE(getJob<fail>(mClient1, 0, "", ""));
+ EXPECT_TRUE(mClient1->getJob<fail>(0, "", ""));
unregisterMultipleClients();
}
@@ -256,31 +258,31 @@
// Submit job to Client1.
ALOGD("Submitting job to client1 (app A) ...");
- EXPECT_TRUE(submit(mClient1, 0, srcPath0, dstPath0, TranscodingJobPriority::kNormal, kBitRate));
+ EXPECT_TRUE(mClient1->submit(0, srcPath0, dstPath0, TranscodingJobPriority::kNormal, kBitRate));
// Client1's job should start immediately.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
ALOGD("Moving app B to top...");
EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
// Client1's job should continue to run, since Client2 (app B) doesn't have any job.
- EXPECT_EQ(mClientCallback1->pop(1000000), EventTracker::NoEvent);
+ EXPECT_EQ(mClient1->pop(1000000), EventTracker::NoEvent);
// Submit job to Client2.
ALOGD("Submitting job to client2 (app B) ...");
- EXPECT_TRUE(submit(mClient2, 0, srcPath1, dstPath1, TranscodingJobPriority::kNormal, kBitRate));
+ EXPECT_TRUE(mClient2->submit(0, srcPath1, dstPath1, TranscodingJobPriority::kNormal, kBitRate));
// Client1's job should pause, client2's job should start.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
// Client2's job should finish, then Client1's job should resume.
- EXPECT_EQ(mClientCallback2->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(2), 0));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
+ EXPECT_EQ(mClient2->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(2), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
// Client1's job should finish.
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
unregisterMultipleClients();
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_resource_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_resource_tests.cpp
new file mode 100644
index 0000000..31697d5
--- /dev/null
+++ b/services/mediatranscoding/tests/mediatranscodingservice_resource_tests.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaTranscodingService.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaTranscodingServiceRealTest"
+
+#include "MediaTranscodingServiceTestHelper.h"
+
+/*
+ * Tests media transcoding service with real transcoder.
+ *
+ * Uses the same test assets as the MediaTranscoder unit tests. Before running the test,
+ * please make sure to push the test assets to /sdcard:
+ *
+ * adb push $TOP/frameworks/av/media/libmediatranscoding/transcoder/tests/assets /data/local/tmp/TranscodingTestAssets
+ */
+namespace android {
+
+namespace media {
+
+constexpr int64_t kPaddingUs = 400000;
+constexpr int32_t kBitRate = 8 * 1000 * 1000; // 8Mbs
+
+constexpr const char* kLongSrcPath = "/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4";
+
+constexpr const char* kResourcePolicyTestActivity =
+ "/com.android.tests.transcoding.ResourcePolicyTestActivity";
+
+#define OUTPATH(name) "/data/local/tmp/MediaTranscodingService_" #name ".MP4"
+
+class MediaTranscodingServiceResourceTest : public MediaTranscodingServiceTestBase {
+public:
+ MediaTranscodingServiceResourceTest() { ALOGI("MediaTranscodingServiceResourceTest created"); }
+
+ virtual ~MediaTranscodingServiceResourceTest() {
+ ALOGI("MediaTranscodingServiceResourceTest destroyed");
+ }
+};
+
+/**
+ * Basic testing for handling resource lost.
+ *
+ * This test starts a transcoding job (that's somewhat long and takes several seconds),
+ * then launches an activity that allocates video codec instances until it hits insufficient
+ * resource error. Because the activity is running in foreground,
+ * ResourceManager would reclaim codecs from transcoding service which should
+ * cause the job to be paused. The activity will hold the codecs for a few seconds
+ * before releasing them, and the transcoding service should be able to resume
+ * and complete the job.
+ */
+TEST_F(MediaTranscodingServiceResourceTest, TestResourceLost) {
+ ALOGD("TestResourceLost starting...");
+
+ EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
+ EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+
+ registerMultipleClients();
+
+ const char* srcPath0 = kLongSrcPath;
+ const char* dstPath0 = OUTPATH(TestPauseResumeMultiClients_Client0);
+ deleteFile(dstPath0);
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+ // Submit job to Client1.
+ ALOGD("Submitting job to client1 (app A) ...");
+ EXPECT_TRUE(mClient1->submit(0, srcPath0, dstPath0, TranscodingJobPriority::kNormal, kBitRate));
+
+ // Client1's job should start immediately.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+ // Launch ResourcePolicyTestActivity, which will try to allocate up to 32
+ // instances, which should trigger insufficient resources on most devices.
+ // (Note that it's possible that the device supports a very high number of
+ // resource instances, in which case we'll simply require that the job completes.)
+ ALOGD("Launch ResourcePolicyTestActivity...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kResourcePolicyTestActivity));
+
+ // The basic requirement is that the job should complete. Wait for finish
+ // event to come and pop up all events received.
+ std::list<EventTracker::Event> events;
+ EXPECT_TRUE(mClient1->waitForSpecificEventAndPop(EventTracker::Finished(CLIENT(1), 0), &events,
+ 15000000));
+
+ // If there is only 1 event, it must be finish (otherwise waitForSpecificEventAndPop
+ // woudldn't pop up anything), and we're ok.
+ //
+ // TODO: If there is only 1 event (finish), and no pause/resume happened, we need
+ // to verify that the ResourcePolicyTestActivity actually was able to allocate
+ // all 32 instances without hitting insufficient resources. Otherwise, it could
+ // be that ResourceManager was not able to reclaim codecs from the transcoding
+ // service at all, which means the resource management is broken.
+ if (events.size() > 1) {
+ EXPECT_TRUE(events.size() >= 3);
+ size_t i = 0;
+ for (auto& event : events) {
+ if (i == 0) {
+ EXPECT_EQ(event, EventTracker::Pause(CLIENT(1), 0));
+ } else if (i == events.size() - 2) {
+ EXPECT_EQ(event, EventTracker::Resume(CLIENT(1), 0));
+ } else if (i == events.size() - 1) {
+ EXPECT_EQ(event, EventTracker::Finished(CLIENT(1), 0));
+ } else {
+ EXPECT_TRUE(event == EventTracker::Pause(CLIENT(1), 0) ||
+ event == EventTracker::Resume(CLIENT(1), 0));
+ }
+ i++;
+ }
+ }
+
+ unregisterMultipleClients();
+
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+}
+
+} // namespace media
+} // namespace android
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
index 42b5877..c912b03 100644
--- a/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
+++ b/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
@@ -47,11 +47,10 @@
// Note that -1 is valid and means using calling pid/uid for the service. But only privilege caller could
// use them. This test is not a privilege caller.
constexpr int32_t kInvalidClientPid = -5;
+constexpr int32_t kInvalidClientUid = -10;
constexpr const char* kInvalidClientName = "";
constexpr const char* kInvalidClientOpPackageName = "";
-constexpr int32_t kClientUseCallingUid = IMediaTranscodingService::USE_CALLING_UID;
-
constexpr int64_t kPaddingUs = 1000000;
constexpr int64_t kJobWithPaddingUs = SimulatedTranscoder::kJobDurationUs + kPaddingUs;
@@ -59,24 +58,18 @@
class MediaTranscodingServiceSimulatedTest : public MediaTranscodingServiceTestBase {
public:
- MediaTranscodingServiceSimulatedTest() {}
+ MediaTranscodingServiceSimulatedTest() { ALOGI("MediaTranscodingServiceResourceTest created"); }
+
+ virtual ~MediaTranscodingServiceSimulatedTest() {
+ ALOGI("MediaTranscodingServiceResourceTest destroyed");
+ }
};
TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterNullClient) {
std::shared_ptr<ITranscodingClient> client;
// Register the client with null callback.
- Status status = mService->registerClient(nullptr, kClientName, kClientOpPackageName,
- kClientUseCallingUid, kClientUseCallingPid, &client);
- EXPECT_FALSE(status.isOk());
-}
-
-TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterClientWithInvalidClientPid) {
- std::shared_ptr<ITranscodingClient> client;
-
- // Register the client with the service.
- Status status = mService->registerClient(mClientCallback1, kClientName, kClientOpPackageName,
- kClientUseCallingUid, kInvalidClientPid, &client);
+ Status status = mService->registerClient(nullptr, kClientName, kClientOpPackageName, &client);
EXPECT_FALSE(status.isOk());
}
@@ -84,9 +77,8 @@
std::shared_ptr<ITranscodingClient> client;
// Register the client with the service.
- Status status = mService->registerClient(mClientCallback1, kInvalidClientName,
- kInvalidClientOpPackageName, kClientUseCallingUid,
- kClientUseCallingPid, &client);
+ Status status = mService->registerClient(mClient1, kInvalidClientName,
+ kInvalidClientOpPackageName, &client);
EXPECT_FALSE(status.isOk());
}
@@ -95,16 +87,14 @@
// Register the client with the service.
Status status =
- mService->registerClient(mClientCallback1, kClientName, kInvalidClientOpPackageName,
- kClientUseCallingUid, kClientUseCallingPid, &client);
+ mService->registerClient(mClient1, kClientName, kInvalidClientOpPackageName, &client);
EXPECT_FALSE(status.isOk());
}
TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterOneClient) {
std::shared_ptr<ITranscodingClient> client;
- Status status = mService->registerClient(mClientCallback1, kClientName, kClientOpPackageName,
- kClientUseCallingUid, kClientUseCallingPid, &client);
+ Status status = mService->registerClient(mClient1, kClientName, kClientOpPackageName, &client);
EXPECT_TRUE(status.isOk());
// Validate the client.
@@ -129,8 +119,7 @@
TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterClientTwice) {
std::shared_ptr<ITranscodingClient> client;
- Status status = mService->registerClient(mClientCallback1, kClientName, kClientOpPackageName,
- kClientUseCallingUid, kClientUseCallingPid, &client);
+ Status status = mService->registerClient(mClient1, kClientName, kClientOpPackageName, &client);
EXPECT_TRUE(status.isOk());
// Validate the client.
@@ -138,8 +127,7 @@
// Register the client again and expects failure.
std::shared_ptr<ITranscodingClient> client1;
- status = mService->registerClient(mClientCallback1, kClientName, kClientOpPackageName,
- kClientUseCallingUid, kClientUseCallingPid, &client1);
+ status = mService->registerClient(mClient1, kClientName, kClientOpPackageName, &client1);
EXPECT_FALSE(status.isOk());
// Unregister the client.
@@ -156,18 +144,18 @@
registerMultipleClients();
// Submit 2 requests on client1 first.
- EXPECT_TRUE(submit(mClient1, 0, "test_source_file", "test_destination_file"));
- EXPECT_TRUE(submit(mClient1, 1, "test_source_file", "test_destination_file"));
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file", "test_destination_file"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file", "test_destination_file"));
// Submit 2 requests on client2, jobId should be independent for each client.
- EXPECT_TRUE(submit(mClient2, 0, "test_source_file", "test_destination_file"));
- EXPECT_TRUE(submit(mClient2, 1, "test_source_file", "test_destination_file"));
+ EXPECT_TRUE(mClient2->submit(0, "test_source_file", "test_destination_file"));
+ EXPECT_TRUE(mClient2->submit(1, "test_source_file", "test_destination_file"));
// Cancel all jobs.
- EXPECT_TRUE(cancel(mClient1, 0));
- EXPECT_TRUE(cancel(mClient1, 1));
- EXPECT_TRUE(cancel(mClient2, 0));
- EXPECT_TRUE(cancel(mClient2, 1));
+ EXPECT_TRUE(mClient1->cancel(0));
+ EXPECT_TRUE(mClient1->cancel(1));
+ EXPECT_TRUE(mClient2->cancel(0));
+ EXPECT_TRUE(mClient2->cancel(1));
unregisterMultipleClients();
}
@@ -176,32 +164,36 @@
registerMultipleClients();
// Test jobId assignment.
- EXPECT_TRUE(submit(mClient1, 0, "test_source_file_0", "test_destination_file"));
- EXPECT_TRUE(submit(mClient1, 1, "test_source_file_1", "test_destination_file"));
- EXPECT_TRUE(submit(mClient1, 2, "test_source_file_2", "test_destination_file"));
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file"));
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file"));
// Test submit bad request (no valid sourceFilePath) fails.
- EXPECT_TRUE(submit<fail>(mClient1, 0, "", ""));
+ EXPECT_TRUE(mClient1->submit<fail>(0, "", ""));
+
+ // Test submit bad request (no valid sourceFilePath) fails.
+ EXPECT_TRUE(mClient1->submit<fail>(0, "src", "dst", TranscodingJobPriority::kNormal, 1000000,
+ kInvalidClientPid, kInvalidClientUid));
// Test cancel non-existent job fails.
- EXPECT_TRUE(cancel<fail>(mClient1, 100));
+ EXPECT_TRUE(mClient1->cancel<fail>(100));
// Job 0 should start immediately and finish in 2 seconds, followed by Job 1 start.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
// Test cancel valid jobId in random order.
// Test cancel finished job fails.
- EXPECT_TRUE(cancel(mClient1, 2));
- EXPECT_TRUE(cancel<fail>(mClient1, 0));
- EXPECT_TRUE(cancel(mClient1, 1));
+ EXPECT_TRUE(mClient1->cancel(2));
+ EXPECT_TRUE(mClient1->cancel<fail>(0));
+ EXPECT_TRUE(mClient1->cancel(1));
// Test cancel job again fails.
- EXPECT_TRUE(cancel<fail>(mClient1, 1));
+ EXPECT_TRUE(mClient1->cancel<fail>(1));
// Test no more events arriving after cancel.
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::NoEvent);
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::NoEvent);
unregisterMultipleClients();
}
@@ -210,36 +202,36 @@
registerMultipleClients();
// Submit 3 requests.
- EXPECT_TRUE(submit(mClient1, 0, "test_source_file_0", "test_destination_file_0"));
- EXPECT_TRUE(submit(mClient1, 1, "test_source_file_1", "test_destination_file_1"));
- EXPECT_TRUE(submit(mClient1, 2, "test_source_file_2", "test_destination_file_2"));
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file_0"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1"));
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file_2"));
// Test get jobs by id.
- EXPECT_TRUE(getJob(mClient1, 2, "test_source_file_2", "test_destination_file_2"));
- EXPECT_TRUE(getJob(mClient1, 1, "test_source_file_1", "test_destination_file_1"));
- EXPECT_TRUE(getJob(mClient1, 0, "test_source_file_0", "test_destination_file_0"));
+ EXPECT_TRUE(mClient1->getJob(2, "test_source_file_2", "test_destination_file_2"));
+ EXPECT_TRUE(mClient1->getJob(1, "test_source_file_1", "test_destination_file_1"));
+ EXPECT_TRUE(mClient1->getJob(0, "test_source_file_0", "test_destination_file_0"));
// Test get job by invalid id fails.
- EXPECT_TRUE(getJob<fail>(mClient1, 100, "", ""));
- EXPECT_TRUE(getJob<fail>(mClient1, -1, "", ""));
+ EXPECT_TRUE(mClient1->getJob<fail>(100, "", ""));
+ EXPECT_TRUE(mClient1->getJob<fail>(-1, "", ""));
// Test get job after cancel fails.
- EXPECT_TRUE(cancel(mClient1, 2));
- EXPECT_TRUE(getJob<fail>(mClient1, 2, "", ""));
+ EXPECT_TRUE(mClient1->cancel(2));
+ EXPECT_TRUE(mClient1->getJob<fail>(2, "", ""));
// Job 0 should start immediately and finish in 2 seconds, followed by Job 1 start.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
// Test get job after finish fails.
- EXPECT_TRUE(getJob<fail>(mClient1, 0, "", ""));
+ EXPECT_TRUE(mClient1->getJob<fail>(0, "", ""));
// Test get the remaining job 1.
- EXPECT_TRUE(getJob(mClient1, 1, "test_source_file_1", "test_destination_file_1"));
+ EXPECT_TRUE(mClient1->getJob(1, "test_source_file_1", "test_destination_file_1"));
// Cancel remaining job 1.
- EXPECT_TRUE(cancel(mClient1, 1));
+ EXPECT_TRUE(mClient1->cancel(1));
unregisterMultipleClients();
}
@@ -248,36 +240,36 @@
registerMultipleClients();
// Submit some offline jobs first.
- EXPECT_TRUE(submit(mClient1, 0, "test_source_file_0", "test_destination_file_0",
- TranscodingJobPriority::kUnspecified));
- EXPECT_TRUE(submit(mClient1, 1, "test_source_file_1", "test_destination_file_1",
- TranscodingJobPriority::kUnspecified));
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file_0",
+ TranscodingJobPriority::kUnspecified));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1",
+ TranscodingJobPriority::kUnspecified));
// Job 0 should start immediately.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
// Submit more real-time jobs.
- EXPECT_TRUE(submit(mClient1, 2, "test_source_file_2", "test_destination_file_2"));
- EXPECT_TRUE(submit(mClient1, 3, "test_source_file_3", "test_destination_file_3"));
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file_2"));
+ EXPECT_TRUE(mClient1->submit(3, "test_source_file_3", "test_destination_file_3"));
// Job 0 should pause immediately and job 2 should start.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 2));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 2));
// Job 2 should finish in 2 seconds and job 3 should start.
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 2));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 3));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 2));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 3));
// Cancel job 3 now
- EXPECT_TRUE(cancel(mClient1, 3));
+ EXPECT_TRUE(mClient1->cancel(3));
// Job 0 should resume and finish in 2 seconds, followed by job 1 start.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
// Cancel remaining job 1.
- EXPECT_TRUE(cancel(mClient1, 1));
+ EXPECT_TRUE(mClient1->cancel(1));
unregisterMultipleClients();
}
@@ -286,8 +278,7 @@
std::shared_ptr<ITranscodingClient> client;
// Register a client, then unregister.
- Status status = mService->registerClient(mClientCallback1, kClientName, kClientOpPackageName,
- kClientUseCallingUid, kClientUseCallingPid, &client);
+ Status status = mService->registerClient(mClient1, kClientName, kClientOpPackageName, &client);
EXPECT_TRUE(status.isOk());
status = client->unregister();
@@ -323,41 +314,41 @@
// Submit 3 requests.
ALOGD("Submitting job to client1 (app A) ...");
- EXPECT_TRUE(submit(mClient1, 0, "test_source_file_0", "test_destination_file_0"));
- EXPECT_TRUE(submit(mClient1, 1, "test_source_file_1", "test_destination_file_1"));
- EXPECT_TRUE(submit(mClient1, 2, "test_source_file_2", "test_destination_file_2"));
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file_0"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1"));
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file_2"));
// Job 0 should start immediately.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
ALOGD("Moving app B to top...");
EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
// Job 0 should continue and finish in 2 seconds, then job 1 should start.
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
ALOGD("Submitting job to client2 (app B) ...");
- EXPECT_TRUE(submit(mClient2, 0, "test_source_file_0", "test_destination_file_0"));
+ EXPECT_TRUE(mClient2->submit(0, "test_source_file_0", "test_destination_file_0"));
// Client1's job should pause, client2's job should start.
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 1));
- EXPECT_EQ(mClientCallback2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 1));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
ALOGD("Moving app A back to top...");
EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
// Client2's job should pause, client1's job 1 should resume.
- EXPECT_EQ(mClientCallback2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 1));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 1));
// Client2's job 1 should finish in 2 seconds, then its job 2 should start.
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
- EXPECT_EQ(mClientCallback1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 2));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 2));
// After client2's jobs finish, client1's job should resume.
- EXPECT_EQ(mClientCallback1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 2));
- EXPECT_EQ(mClientCallback2->pop(kPaddingUs), EventTracker::Resume(CLIENT(2), 0));
+ EXPECT_EQ(mClient1->pop(kJobWithPaddingUs), EventTracker::Finished(CLIENT(1), 2));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Resume(CLIENT(2), 0));
unregisterMultipleClients();
diff --git a/services/oboeservice/AAudioClientTracker.cpp b/services/oboeservice/AAudioClientTracker.cpp
index 9d9ca63..3ec8dea 100644
--- a/services/oboeservice/AAudioClientTracker.cpp
+++ b/services/oboeservice/AAudioClientTracker.cpp
@@ -198,7 +198,7 @@
for (const auto& serviceStream : streamsToClose) {
aaudio_handle_t handle = serviceStream->getHandle();
ALOGW("binderDied() close abandoned stream 0x%08X\n", handle);
- aaudioService->closeStream(handle);
+ aaudioService->asAAudioServiceInterface().closeStream(handle);
}
// mStreams should be empty now
}
diff --git a/services/oboeservice/AAudioClientTracker.h b/services/oboeservice/AAudioClientTracker.h
index 943b809..facfc3b 100644
--- a/services/oboeservice/AAudioClientTracker.h
+++ b/services/oboeservice/AAudioClientTracker.h
@@ -24,7 +24,7 @@
#include <utils/Singleton.h>
#include <aaudio/AAudio.h>
-#include "binding/IAAudioClient.h"
+#include <aaudio/IAAudioClient.h>
#include "AAudioService.h"
namespace aaudio {
@@ -46,7 +46,7 @@
*/
std::string dump() const;
- aaudio_result_t registerClient(pid_t pid, const android::sp<android::IAAudioClient>& client);
+ aaudio_result_t registerClient(pid_t pid, const android::sp<IAAudioClient>& client);
void unregisterClient(pid_t pid);
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 22cdb35..69e58f6 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -32,26 +32,26 @@
#include "AAudioService.h"
#include "AAudioServiceStreamMMAP.h"
#include "AAudioServiceStreamShared.h"
-#include "binding/IAAudioService.h"
using namespace android;
using namespace aaudio;
#define MAX_STREAMS_PER_PROCESS 8
+#define AIDL_RETURN(x) *_aidl_return = (x); return Status::ok();
+
using android::AAudioService;
+using binder::Status;
android::AAudioService::AAudioService()
- : BnAAudioService() {
+ : BnAAudioService(),
+ mAdapter(this) {
mAudioClient.clientUid = getuid(); // TODO consider using geteuid()
mAudioClient.clientPid = getpid();
mAudioClient.packageName = String16("");
AAudioClientTracker::getInstance().setAAudioService(this);
}
-AAudioService::~AAudioService() {
-}
-
status_t AAudioService::dump(int fd, const Vector<String16>& args) {
std::string result;
@@ -72,18 +72,21 @@
return NO_ERROR;
}
-void AAudioService::registerClient(const sp<IAAudioClient>& client) {
+Status AAudioService::registerClient(const sp<IAAudioClient> &client) {
pid_t pid = IPCThreadState::self()->getCallingPid();
AAudioClientTracker::getInstance().registerClient(pid, client);
+ return Status::ok();
}
-bool AAudioService::isCallerInService() {
- return mAudioClient.clientPid == IPCThreadState::self()->getCallingPid() &&
- mAudioClient.clientUid == IPCThreadState::self()->getCallingUid();
-}
+Status
+AAudioService::openStream(const StreamRequest &_request, StreamParameters* _paramsOut,
+ int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
-aaudio_handle_t AAudioService::openStream(const aaudio::AAudioStreamRequest &request,
- aaudio::AAudioStreamConfiguration &configurationOutput) {
+ // Create wrapper objects for simple usage of the parcelables.
+ const AAudioStreamRequest request(_request);
+ AAudioStreamConfiguration paramsOut;
+
// A lock in is used to order the opening of endpoints when an
// EXCLUSIVE endpoint is stolen. We want the order to be:
// 1) Thread A opens exclusive MMAP endpoint
@@ -108,13 +111,13 @@
if (count >= MAX_STREAMS_PER_PROCESS) {
ALOGE("openStream(): exceeded max streams per process %d >= %d",
count, MAX_STREAMS_PER_PROCESS);
- return AAUDIO_ERROR_UNAVAILABLE;
+ AIDL_RETURN(AAUDIO_ERROR_UNAVAILABLE);
}
}
if (sharingMode != AAUDIO_SHARING_MODE_EXCLUSIVE && sharingMode != AAUDIO_SHARING_MODE_SHARED) {
ALOGE("openStream(): unrecognized sharing mode = %d", sharingMode);
- return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ AIDL_RETURN(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
}
if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE
@@ -147,29 +150,124 @@
if (result != AAUDIO_OK) {
serviceStream.clear();
- return result;
+ AIDL_RETURN(result);
} else {
aaudio_handle_t handle = mStreamTracker.addStreamForHandle(serviceStream.get());
serviceStream->setHandle(handle);
pid_t pid = request.getProcessId();
AAudioClientTracker::getInstance().registerClientStream(pid, serviceStream);
- configurationOutput.copyFrom(*serviceStream);
+ paramsOut.copyFrom(*serviceStream);
+ *_paramsOut = std::move(paramsOut).parcelable();
// Log open in MediaMetrics after we have the handle because we need the handle to
// create the metrics ID.
serviceStream->logOpen(handle);
ALOGV("%s(): return handle = 0x%08X", __func__, handle);
- return handle;
+ AIDL_RETURN(handle);
}
}
-aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+Status AAudioService::closeStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
// Check permission and ownership first.
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("closeStream(0x%0x), illegal stream handle", streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
}
- return closeStream(serviceStream);
+ AIDL_RETURN(closeStream(serviceStream));
+}
+
+Status AAudioService::getStreamDescription(int32_t streamHandle, Endpoint* endpoint,
+ int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGE("getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AudioEndpointParcelable endpointParcelable;
+ aaudio_result_t result = serviceStream->getDescription(endpointParcelable);
+ if (result == AAUDIO_OK) {
+ *endpoint = std::move(endpointParcelable).parcelable();
+ }
+ AIDL_RETURN(result);
+}
+
+Status AAudioService::startStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->start());
+}
+
+Status AAudioService::pauseStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->pause());
+}
+
+Status AAudioService::stopStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->stop());
+}
+
+Status AAudioService::flushStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->flush());
+}
+
+Status AAudioService::registerAudioThread(int32_t streamHandle, int32_t clientThreadId, int64_t periodNanoseconds,
+ int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ int32_t priority = isCallerInService()
+ ? kRealTimeAudioPriorityService : kRealTimeAudioPriorityClient;
+ AIDL_RETURN(serviceStream->registerAudioThread(clientThreadId, priority));
+}
+
+Status AAudioService::unregisterAudioThread(int32_t streamHandle, int32_t clientThreadId,
+ int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->unregisterAudioThread(clientThreadId));
+}
+
+bool AAudioService::isCallerInService() {
+ return mAudioClient.clientPid == IPCThreadState::self()->getCallingPid() &&
+ mAudioClient.clientUid == IPCThreadState::self()->getCallingUid();
}
aaudio_result_t AAudioService::closeStream(sp<AAudioServiceStreamBase> serviceStream) {
@@ -205,76 +303,6 @@
return serviceStream;
}
-aaudio_result_t AAudioService::getStreamDescription(
- aaudio_handle_t streamHandle,
- aaudio::AudioEndpointParcelable &parcelable) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGE("getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->getDescription(parcelable);
-}
-
-aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->start();
-}
-
-aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->pause();
-}
-
-aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->stop();
-}
-
-aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->flush();
-}
-
-aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
- pid_t clientThreadId,
- int64_t /* periodNanoseconds */) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- int32_t priority = isCallerInService()
- ? kRealTimeAudioPriorityService : kRealTimeAudioPriorityClient;
- return serviceStream->registerAudioThread(clientThreadId, priority);
-}
-
-aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
- pid_t clientThreadId) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->unregisterAudioThread(clientThreadId);
-}
-
aaudio_result_t AAudioService::startClient(aaudio_handle_t streamHandle,
const android::AudioClient& client,
const audio_attributes_t *attr,
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index caf48a5..7c1b796 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -24,69 +24,71 @@
#include <media/AudioClient.h>
#include <aaudio/AAudio.h>
+#include <aaudio/BnAAudioService.h>
#include "binding/AAudioCommon.h"
+#include "binding/AAudioBinderAdapter.h"
#include "binding/AAudioServiceInterface.h"
-#include "binding/IAAudioService.h"
#include "AAudioServiceStreamBase.h"
#include "AAudioStreamTracker.h"
namespace android {
+#define AAUDIO_SERVICE_NAME "media.aaudio"
+
class AAudioService :
public BinderService<AAudioService>,
- public BnAAudioService,
- public aaudio::AAudioServiceInterface
+ public aaudio::BnAAudioService
{
friend class BinderService<AAudioService>;
public:
AAudioService();
- virtual ~AAudioService();
+ virtual ~AAudioService() = default;
+
+ aaudio::AAudioServiceInterface& asAAudioServiceInterface() {
+ return mAdapter;
+ }
static const char* getServiceName() { return AAUDIO_SERVICE_NAME; }
virtual status_t dump(int fd, const Vector<String16>& args) override;
- virtual void registerClient(const sp<IAAudioClient>& client);
+ binder::Status registerClient(const ::android::sp<::aaudio::IAAudioClient>& client) override;
- aaudio::aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
- aaudio::AAudioStreamConfiguration &configurationOutput)
- override;
+ binder::Status openStream(const ::aaudio::StreamRequest& request,
+ ::aaudio::StreamParameters* paramsOut,
+ int32_t* _aidl_return) override;
- /*
- * This is called from Binder. It checks for permissions
- * and converts the handle passed through Binder to a stream pointer.
- */
- aaudio_result_t closeStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status closeStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t getStreamDescription(
- aaudio::aaudio_handle_t streamHandle,
- aaudio::AudioEndpointParcelable &parcelable) override;
+ binder::Status
+ getStreamDescription(int32_t streamHandle, ::aaudio::Endpoint* endpoint,
+ int32_t* _aidl_return) override;
- aaudio_result_t startStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status startStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status pauseStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t stopStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status stopStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t flushStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status flushStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t registerAudioThread(aaudio::aaudio_handle_t streamHandle,
- pid_t tid,
- int64_t periodNanoseconds) override;
+ binder::Status
+ registerAudioThread(int32_t streamHandle, int32_t clientThreadId, int64_t periodNanoseconds,
+ int32_t* _aidl_return) override;
- aaudio_result_t unregisterAudioThread(aaudio::aaudio_handle_t streamHandle,
- pid_t tid) override;
+ binder::Status unregisterAudioThread(int32_t streamHandle, int32_t clientThreadId,
+ int32_t* _aidl_return) override;
aaudio_result_t startClient(aaudio::aaudio_handle_t streamHandle,
const android::AudioClient& client,
const audio_attributes_t *attr,
- audio_port_handle_t *clientHandle) override;
+ audio_port_handle_t *clientHandle);
aaudio_result_t stopClient(aaudio::aaudio_handle_t streamHandle,
- audio_port_handle_t clientHandle) override;
+ audio_port_handle_t clientHandle);
// ===============================================================================
// The following public methods are only called from the service and NOT by Binder.
@@ -101,6 +103,29 @@
aaudio_result_t closeStream(sp<aaudio::AAudioServiceStreamBase> serviceStream);
private:
+ class Adapter : public aaudio::AAudioBinderAdapter {
+ public:
+ explicit Adapter(AAudioService *service)
+ : aaudio::AAudioBinderAdapter(service),
+ mService(service) {}
+
+ aaudio_result_t startClient(aaudio::aaudio_handle_t streamHandle,
+ const android::AudioClient &client,
+ const audio_attributes_t *attr,
+ audio_port_handle_t *clientHandle) override {
+ return mService->startClient(streamHandle, client, attr, clientHandle);
+ }
+
+ aaudio_result_t stopClient(aaudio::aaudio_handle_t streamHandle,
+ audio_port_handle_t clientHandle) override {
+ return mService->stopClient(streamHandle, clientHandle);
+ }
+
+ private:
+ AAudioService* const mService;
+ };
+
+ Adapter mAdapter;
/** @return true if the client is the audioserver
*/
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
index 206a264..b86fe9d 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.cpp
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -35,9 +35,9 @@
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
-AAudioServiceEndpointCapture::AAudioServiceEndpointCapture(AAudioService &audioService)
- : AAudioServiceEndpointShared(
- (AudioStreamInternal *)(new AudioStreamInternalCapture(audioService, true))) {
+AAudioServiceEndpointCapture::AAudioServiceEndpointCapture(AAudioService& audioService)
+ : AAudioServiceEndpointShared(
+ new AudioStreamInternalCapture(audioService.asAAudioServiceInterface(), true)) {
}
aaudio_result_t AAudioServiceEndpointCapture::open(const aaudio::AAudioStreamRequest &request) {
@@ -65,7 +65,7 @@
result = getStreamInternal()->read(mDistributionBuffer.get(),
getFramesPerBurst(), timeoutNanos);
if (result == AAUDIO_ERROR_DISCONNECTED) {
- disconnectRegisteredStreams();
+ ALOGV("%s() read() returned AAUDIO_ERROR_DISCONNECTED, break", __func__);
break;
} else if (result != getFramesPerBurst()) {
ALOGW("callbackLoop() read %d / %d",
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 730d161..53cb70b 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -41,10 +41,9 @@
#define BURSTS_PER_BUFFER_DEFAULT 2
-AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService &audioService)
- : AAudioServiceEndpointShared(
- (AudioStreamInternal *)(new AudioStreamInternalPlay(audioService, true))) {
-}
+AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService& audioService)
+ : AAudioServiceEndpointShared(
+ new AudioStreamInternalPlay(audioService.asAAudioServiceInterface(), true)) {}
aaudio_result_t AAudioServiceEndpointPlay::open(const aaudio::AAudioStreamRequest &request) {
aaudio_result_t result = AAudioServiceEndpointShared::open(request);
@@ -146,7 +145,7 @@
result = getStreamInternal()->write(mMixer.getOutputBuffer(),
getFramesPerBurst(), timeoutNanos);
if (result == AAUDIO_ERROR_DISCONNECTED) {
- AAudioServiceEndpointShared::disconnectRegisteredStreams();
+ ALOGV("%s() write() returned AAUDIO_ERROR_DISCONNECTED, break", __func__);
break;
} else if (result != getFramesPerBurst()) {
ALOGW("callbackLoop() wrote %d / %d",
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index ea691cf..9736091 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -26,7 +26,6 @@
#include <media/TypeConverter.h>
#include <mediautils/SchedulingPolicyService.h>
-#include "binding/IAAudioService.h"
#include "binding/AAudioServiceMessage.h"
#include "core/AudioGlobal.h"
#include "utility/AudioClock.h"
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 51c26e9..f9efc2a 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -24,9 +24,10 @@
#include <utils/RefBase.h>
#include "fifo/FifoBuffer.h"
-#include "binding/IAAudioService.h"
#include "binding/AudioEndpointParcelable.h"
#include "binding/AAudioServiceMessage.h"
+#include "binding/AAudioStreamRequest.h"
+#include "core/AAudioStreamParameters.h"
#include "utility/AAudioUtilities.h"
#include "utility/AudioClock.h"
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index e88a81e..031468e 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -24,8 +24,6 @@
#include <aaudio/AAudio.h>
-#include "binding/IAAudioService.h"
-
#include "binding/AAudioServiceMessage.h"
#include "AAudioServiceStreamBase.h"
#include "AAudioServiceStreamShared.h"
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 8b1e2c0..31e590e 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -55,6 +55,11 @@
"libcutils",
"liblog",
"libutils",
+ "aaudio-aidl-cpp",
+ ],
+
+ export_shared_lib_headers: [
+ "libaaudio_internal",
],
header_libs: [